]> git.saurik.com Git - apple/xnu.git/blob - bsd/nfs/nfs_socket.c
xnu-7195.50.7.100.1.tar.gz
[apple/xnu.git] / bsd / nfs / nfs_socket.c
1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1991, 1993, 1995
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Rick Macklem at The University of Guelph.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95
65 * FreeBSD-Id: nfs_socket.c,v 1.30 1997/10/28 15:59:07 bde Exp $
66 */
67
68 #include <nfs/nfs_conf.h>
69 #if CONFIG_NFS
70
71 /*
72 * Socket operations for use by nfs
73 */
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/proc.h>
78 #include <sys/signalvar.h>
79 #include <sys/kauth.h>
80 #include <sys/mount_internal.h>
81 #include <sys/kernel.h>
82 #include <sys/kpi_mbuf.h>
83 #include <sys/malloc.h>
84 #include <sys/vnode.h>
85 #include <sys/domain.h>
86 #include <sys/protosw.h>
87 #include <sys/socket.h>
88 #include <sys/un.h>
89 #include <sys/syslog.h>
90 #include <sys/tprintf.h>
91 #include <libkern/OSAtomic.h>
92
93 #include <sys/time.h>
94 #include <kern/clock.h>
95 #include <kern/task.h>
96 #include <kern/thread.h>
97 #include <kern/thread_call.h>
98 #include <sys/user.h>
99 #include <sys/acct.h>
100
101 #include <netinet/in.h>
102 #include <netinet/tcp.h>
103
104 #include <nfs/rpcv2.h>
105 #include <nfs/krpc.h>
106 #include <nfs/nfsproto.h>
107 #include <nfs/nfs.h>
108 #include <nfs/xdr_subs.h>
109 #include <nfs/nfsm_subs.h>
110 #include <nfs/nfs_gss.h>
111 #include <nfs/nfsmount.h>
112 #include <nfs/nfsnode.h>
113
114 #define NFS_SOCK_DBG(...) NFS_DBG(NFS_FAC_SOCK, 7, ## __VA_ARGS__)
115 #define NFS_SOCK_DUMP_MBUF(msg, mb) if (NFS_IS_DBG(NFS_FAC_SOCK, 15)) nfs_dump_mbuf(__func__, __LINE__, (msg), (mb))
116
117 /* XXX */
118 boolean_t current_thread_aborted(void);
119 kern_return_t thread_terminate(thread_t);
120
121 ZONE_DECLARE(nfs_fhandle_zone, "fhandle", sizeof(struct fhandle), ZC_NONE);
122 ZONE_DECLARE(nfs_req_zone, "NFS req", sizeof(struct nfsreq), ZC_NONE);
123 ZONE_DECLARE(nfsrv_descript_zone, "NFSV3 srvdesc",
124 sizeof(struct nfsrv_descript), ZC_NONE);
125
126
127 #if CONFIG_NFS_SERVER
128 int nfsrv_sock_max_rec_queue_length = 128; /* max # RPC records queued on (UDP) socket */
129
130 int nfsrv_getstream(struct nfsrv_sock *, int);
131 int nfsrv_getreq(struct nfsrv_descript *);
132 extern int nfsv3_procid[NFS_NPROCS];
133 #endif /* CONFIG_NFS_SERVER */
134
135 /*
136 * compare two sockaddr structures
137 */
138 int
139 nfs_sockaddr_cmp(struct sockaddr *sa1, struct sockaddr *sa2)
140 {
141 if (!sa1) {
142 return -1;
143 }
144 if (!sa2) {
145 return 1;
146 }
147 if (sa1->sa_family != sa2->sa_family) {
148 return (sa1->sa_family < sa2->sa_family) ? -1 : 1;
149 }
150 if (sa1->sa_len != sa2->sa_len) {
151 return (sa1->sa_len < sa2->sa_len) ? -1 : 1;
152 }
153 if (sa1->sa_family == AF_INET) {
154 return bcmp(&((struct sockaddr_in*)sa1)->sin_addr,
155 &((struct sockaddr_in*)sa2)->sin_addr, sizeof(((struct sockaddr_in*)sa1)->sin_addr));
156 }
157 if (sa1->sa_family == AF_INET6) {
158 return bcmp(&((struct sockaddr_in6*)sa1)->sin6_addr,
159 &((struct sockaddr_in6*)sa2)->sin6_addr, sizeof(((struct sockaddr_in6*)sa1)->sin6_addr));
160 }
161 return -1;
162 }
163
164 #if CONFIG_NFS_CLIENT
165
166 int nfs_connect_search_new_socket(struct nfsmount *, struct nfs_socket_search *, struct timeval *);
167 int nfs_connect_search_socket_connect(struct nfsmount *, struct nfs_socket *, int);
168 int nfs_connect_search_ping(struct nfsmount *, struct nfs_socket *, struct timeval *);
169 void nfs_connect_search_socket_found(struct nfsmount *, struct nfs_socket_search *, struct nfs_socket *);
170 void nfs_connect_search_socket_reap(struct nfsmount *, struct nfs_socket_search *, struct timeval *);
171 int nfs_connect_search_check(struct nfsmount *, struct nfs_socket_search *, struct timeval *);
172 int nfs_reconnect(struct nfsmount *);
173 int nfs_connect_setup(struct nfsmount *);
174 void nfs_mount_sock_thread(void *, wait_result_t);
175 void nfs_udp_rcv(socket_t, void*, int);
176 void nfs_tcp_rcv(socket_t, void*, int);
177 void nfs_sock_poke(struct nfsmount *);
178 void nfs_request_match_reply(struct nfsmount *, mbuf_t);
179 void nfs_reqdequeue(struct nfsreq *);
180 void nfs_reqbusy(struct nfsreq *);
181 struct nfsreq *nfs_reqnext(struct nfsreq *);
182 int nfs_wait_reply(struct nfsreq *);
183 void nfs_softterm(struct nfsreq *);
184 int nfs_can_squish(struct nfsmount *);
185 int nfs_is_squishy(struct nfsmount *);
186 int nfs_is_dead(int, struct nfsmount *);
187
188 /*
189 * Estimate rto for an nfs rpc sent via. an unreliable datagram.
190 * Use the mean and mean deviation of rtt for the appropriate type of rpc
191 * for the frequent rpcs and a default for the others.
192 * The justification for doing "other" this way is that these rpcs
193 * happen so infrequently that timer est. would probably be stale.
194 * Also, since many of these rpcs are
195 * non-idempotent, a conservative timeout is desired.
196 * getattr, lookup - A+2D
197 * read, write - A+4D
198 * other - nm_timeo
199 */
200 #define NFS_RTO(n, t) \
201 ((t) == 0 ? (n)->nm_timeo : \
202 ((t) < 3 ? \
203 (((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \
204 ((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1)))
205 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum] - 1]
206 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum] - 1]
207
208 /*
209 * Defines which timer to use for the procnum.
210 * 0 - default
211 * 1 - getattr
212 * 2 - lookup
213 * 3 - read
214 * 4 - write
215 */
216 static const int proct[] = {
217 [NFSPROC_NULL] = 0,
218 [NFSPROC_GETATTR] = 1,
219 [NFSPROC_SETATTR] = 0,
220 [NFSPROC_LOOKUP] = 2,
221 [NFSPROC_ACCESS] = 1,
222 [NFSPROC_READLINK] = 3,
223 [NFSPROC_READ] = 3,
224 [NFSPROC_WRITE] = 4,
225 [NFSPROC_CREATE] = 0,
226 [NFSPROC_MKDIR] = 0,
227 [NFSPROC_SYMLINK] = 0,
228 [NFSPROC_MKNOD] = 0,
229 [NFSPROC_REMOVE] = 0,
230 [NFSPROC_RMDIR] = 0,
231 [NFSPROC_RENAME] = 0,
232 [NFSPROC_LINK] = 0,
233 [NFSPROC_READDIR] = 3,
234 [NFSPROC_READDIRPLUS] = 3,
235 [NFSPROC_FSSTAT] = 0,
236 [NFSPROC_FSINFO] = 0,
237 [NFSPROC_PATHCONF] = 0,
238 [NFSPROC_COMMIT] = 0,
239 [NFSPROC_NOOP] = 0,
240 };
241
242 /*
243 * There is a congestion window for outstanding rpcs maintained per mount
244 * point. The cwnd size is adjusted in roughly the way that:
245 * Van Jacobson, Congestion avoidance and Control, In "Proceedings of
246 * SIGCOMM '88". ACM, August 1988.
247 * describes for TCP. The cwnd size is chopped in half on a retransmit timeout
248 * and incremented by 1/cwnd when each rpc reply is received and a full cwnd
249 * of rpcs is in progress.
250 * (The sent count and cwnd are scaled for integer arith.)
251 * Variants of "slow start" were tried and were found to be too much of a
252 * performance hit (ave. rtt 3 times larger),
253 * I suspect due to the large rtt that nfs rpcs have.
254 */
255 #define NFS_CWNDSCALE 256
256 #define NFS_MAXCWND (NFS_CWNDSCALE * 32)
257 static int nfs_backoff[8] = { 2, 4, 8, 16, 32, 64, 128, 256, };
258
259 /*
260 * Increment location index to next address/server/location.
261 */
262 void
263 nfs_location_next(struct nfs_fs_locations *nlp, struct nfs_location_index *nlip)
264 {
265 uint8_t loc = nlip->nli_loc;
266 uint8_t serv = nlip->nli_serv;
267 uint8_t addr = nlip->nli_addr;
268
269 /* move to next address */
270 addr++;
271 if (addr >= nlp->nl_locations[loc]->nl_servers[serv]->ns_addrcount) {
272 /* no more addresses on current server, go to first address of next server */
273 next_server:
274 addr = 0;
275 serv++;
276 if (serv >= nlp->nl_locations[loc]->nl_servcount) {
277 /* no more servers on current location, go to first server of next location */
278 serv = 0;
279 loc++;
280 if (loc >= nlp->nl_numlocs) {
281 loc = 0; /* after last location, wrap back around to first location */
282 }
283 }
284 }
285 /*
286 * It's possible for this next server to not have any addresses.
287 * Check for that here and go to the next server.
288 * But bail out if we've managed to come back around to the original
289 * location that was passed in. (That would mean no servers had any
290 * addresses. And we don't want to spin here forever.)
291 */
292 if ((loc == nlip->nli_loc) && (serv == nlip->nli_serv) && (addr == nlip->nli_addr)) {
293 return;
294 }
295 if (addr >= nlp->nl_locations[loc]->nl_servers[serv]->ns_addrcount) {
296 goto next_server;
297 }
298
299 nlip->nli_loc = loc;
300 nlip->nli_serv = serv;
301 nlip->nli_addr = addr;
302 }
303
304 /*
305 * Compare two location indices.
306 */
307 int
308 nfs_location_index_cmp(struct nfs_location_index *nlip1, struct nfs_location_index *nlip2)
309 {
310 if (nlip1->nli_loc != nlip2->nli_loc) {
311 return nlip1->nli_loc - nlip2->nli_loc;
312 }
313 if (nlip1->nli_serv != nlip2->nli_serv) {
314 return nlip1->nli_serv - nlip2->nli_serv;
315 }
316 return nlip1->nli_addr - nlip2->nli_addr;
317 }
318
319 /*
320 * Get the mntfromname (or path portion only) for a given location.
321 */
322 void
323 nfs_location_mntfromname(struct nfs_fs_locations *locs, struct nfs_location_index idx, char *s, size_t size, int pathonly)
324 {
325 struct nfs_fs_location *fsl = locs->nl_locations[idx.nli_loc];
326 char *p;
327 int cnt, i;
328
329 p = s;
330 if (!pathonly) {
331 char *name = fsl->nl_servers[idx.nli_serv]->ns_name;
332 if (name == NULL) {
333 name = "";
334 }
335 if (*name == '\0') {
336 if (*fsl->nl_servers[idx.nli_serv]->ns_addresses[idx.nli_addr]) {
337 name = fsl->nl_servers[idx.nli_serv]->ns_addresses[idx.nli_addr];
338 }
339 cnt = scnprintf(p, size, "<%s>:", name);
340 } else {
341 cnt = scnprintf(p, size, "%s:", name);
342 }
343 p += cnt;
344 size -= cnt;
345 }
346 if (fsl->nl_path.np_compcount == 0) {
347 /* mounting root export on server */
348 if (size > 0) {
349 *p++ = '/';
350 *p++ = '\0';
351 }
352 return;
353 }
354 /* append each server path component */
355 for (i = 0; (size > 0) && (i < (int)fsl->nl_path.np_compcount); i++) {
356 cnt = scnprintf(p, size, "/%s", fsl->nl_path.np_components[i]);
357 p += cnt;
358 size -= cnt;
359 }
360 }
361
362 /*
363 * NFS client connect socket upcall.
364 * (Used only during socket connect/search.)
365 */
366 void
367 nfs_connect_upcall(socket_t so, void *arg, __unused int waitflag)
368 {
369 struct nfs_socket *nso = arg;
370 size_t rcvlen;
371 mbuf_t m;
372 int error = 0, recv = 1;
373
374 if (nso->nso_flags & NSO_CONNECTING) {
375 NFS_SOCK_DBG("nfs connect - socket %p upcall - connecting flags = %8.8x\n", nso, nso->nso_flags);
376 wakeup(nso->nso_wake);
377 return;
378 }
379
380 lck_mtx_lock(&nso->nso_lock);
381 if ((nso->nso_flags & (NSO_UPCALL | NSO_DISCONNECTING | NSO_DEAD)) || !(nso->nso_flags & NSO_PINGING)) {
382 NFS_SOCK_DBG("nfs connect - socket %p upcall - nevermind\n", nso);
383 lck_mtx_unlock(&nso->nso_lock);
384 return;
385 }
386 NFS_SOCK_DBG("nfs connect - socket %p upcall %8.8x\n", nso, nso->nso_flags);
387 nso->nso_flags |= NSO_UPCALL;
388
389 /* loop while we make error-free progress */
390 while (!error && recv) {
391 /* make sure we're still interested in this socket */
392 if (nso->nso_flags & (NSO_DISCONNECTING | NSO_DEAD)) {
393 break;
394 }
395 lck_mtx_unlock(&nso->nso_lock);
396 m = NULL;
397 if (nso->nso_sotype == SOCK_STREAM) {
398 error = nfs_rpc_record_read(so, &nso->nso_rrs, MSG_DONTWAIT, &recv, &m);
399 NFS_SOCK_DBG("nfs_rpc_record_read returned %d recv = %d\n", error, recv);
400 } else {
401 rcvlen = 1000000;
402 error = sock_receivembuf(so, NULL, &m, MSG_DONTWAIT, &rcvlen);
403 recv = m ? 1 : 0;
404 }
405 lck_mtx_lock(&nso->nso_lock);
406 if (m) {
407 /* match response with request */
408 struct nfsm_chain nmrep;
409 uint32_t reply = 0, rxid = 0, verf_type, verf_len;
410 uint32_t reply_status, rejected_status, accepted_status;
411
412 NFS_SOCK_DUMP_MBUF("Got mbuf from ping", m);
413 nfsm_chain_dissect_init(error, &nmrep, m);
414 nfsm_chain_get_32(error, &nmrep, rxid);
415 nfsm_chain_get_32(error, &nmrep, reply);
416 if (!error && ((reply != RPC_REPLY) || (rxid != nso->nso_pingxid))) {
417 error = EBADRPC;
418 }
419 nfsm_chain_get_32(error, &nmrep, reply_status);
420 if (!error && (reply_status == RPC_MSGDENIED)) {
421 nfsm_chain_get_32(error, &nmrep, rejected_status);
422 if (!error) {
423 error = (rejected_status == RPC_MISMATCH) ? ERPCMISMATCH : EACCES;
424 }
425 }
426 nfsm_chain_get_32(error, &nmrep, verf_type); /* verifier flavor */
427 nfsm_chain_get_32(error, &nmrep, verf_len); /* verifier length */
428 nfsmout_if(error);
429 if (verf_len) {
430 nfsm_chain_adv(error, &nmrep, nfsm_rndup(verf_len));
431 }
432 nfsm_chain_get_32(error, &nmrep, accepted_status);
433 nfsmout_if(error);
434 NFS_SOCK_DBG("Recevied accepted_status of %d nso_version = %d\n", accepted_status, nso->nso_version);
435 if ((accepted_status == RPC_PROGMISMATCH) && !nso->nso_version) {
436 uint32_t minvers, maxvers;
437 nfsm_chain_get_32(error, &nmrep, minvers);
438 nfsm_chain_get_32(error, &nmrep, maxvers);
439 nfsmout_if(error);
440 if (nso->nso_protocol == PMAPPROG) {
441 if ((minvers > RPCBVERS4) || (maxvers < PMAPVERS)) {
442 error = EPROGMISMATCH;
443 } else if ((nso->nso_saddr->sa_family == AF_INET) &&
444 (PMAPVERS >= minvers) && (PMAPVERS <= maxvers)) {
445 nso->nso_version = PMAPVERS;
446 } else if (nso->nso_saddr->sa_family == AF_INET6) {
447 if ((RPCBVERS4 >= minvers) && (RPCBVERS4 <= maxvers)) {
448 nso->nso_version = RPCBVERS4;
449 } else if ((RPCBVERS3 >= minvers) && (RPCBVERS3 <= maxvers)) {
450 nso->nso_version = RPCBVERS3;
451 }
452 }
453 } else if (nso->nso_protocol == NFS_PROG) {
454 int vers;
455
456 /*
457 * N.B. Both portmapper and rpcbind V3 are happy to return
458 * addresses for other versions than the one you ask (getport or
459 * getaddr) and thus we may have fallen to this code path. So if
460 * we get a version that we support, use highest supported
461 * version. This assumes that the server supports all versions
462 * between minvers and maxvers. Note for IPv6 we will try and
463 * use rpcbind V4 which has getversaddr and we should not get
464 * here if that was successful.
465 */
466 for (vers = nso->nso_nfs_max_vers; vers >= (int)nso->nso_nfs_min_vers; vers--) {
467 if (vers >= (int)minvers && vers <= (int)maxvers) {
468 break;
469 }
470 }
471 nso->nso_version = (vers < (int)nso->nso_nfs_min_vers) ? 0 : vers;
472 }
473 if (!error && nso->nso_version) {
474 accepted_status = RPC_SUCCESS;
475 }
476 }
477 if (!error) {
478 switch (accepted_status) {
479 case RPC_SUCCESS:
480 error = 0;
481 break;
482 case RPC_PROGUNAVAIL:
483 error = EPROGUNAVAIL;
484 break;
485 case RPC_PROGMISMATCH:
486 error = EPROGMISMATCH;
487 break;
488 case RPC_PROCUNAVAIL:
489 error = EPROCUNAVAIL;
490 break;
491 case RPC_GARBAGE:
492 error = EBADRPC;
493 break;
494 case RPC_SYSTEM_ERR:
495 default:
496 error = EIO;
497 break;
498 }
499 }
500 nfsmout:
501 nso->nso_flags &= ~NSO_PINGING;
502 if (error) {
503 NFS_SOCK_DBG("nfs upcalled failed for %d program %d vers error = %d\n",
504 nso->nso_protocol, nso->nso_version, error);
505 nso->nso_error = error;
506 nso->nso_flags |= NSO_DEAD;
507 } else {
508 nso->nso_flags |= NSO_VERIFIED;
509 }
510 mbuf_freem(m);
511 /* wake up search thread */
512 wakeup(nso->nso_wake);
513 break;
514 }
515 }
516
517 nso->nso_flags &= ~NSO_UPCALL;
518 if ((error != EWOULDBLOCK) && (error || !recv)) {
519 /* problems with the socket... */
520 NFS_SOCK_DBG("connect upcall failed %d\n", error);
521 nso->nso_error = error ? error : EPIPE;
522 nso->nso_flags |= NSO_DEAD;
523 wakeup(nso->nso_wake);
524 }
525 if (nso->nso_flags & NSO_DISCONNECTING) {
526 wakeup(&nso->nso_flags);
527 }
528 lck_mtx_unlock(&nso->nso_lock);
529 }
530
531 /*
532 * Create/initialize an nfs_socket structure.
533 */
534 int
535 nfs_socket_create(
536 struct nfsmount *nmp,
537 struct sockaddr *sa,
538 uint8_t sotype,
539 in_port_t port,
540 uint32_t protocol,
541 uint32_t vers,
542 int resvport,
543 struct nfs_socket **nsop)
544 {
545 struct nfs_socket *nso;
546 struct timeval now;
547 int error;
548 #define NFS_SOCKET_DEBUGGING
549 #ifdef NFS_SOCKET_DEBUGGING
550 char naddr[sizeof((struct sockaddr_un *)0)->sun_path];
551 void *sinaddr;
552
553 switch (sa->sa_family) {
554 case AF_INET:
555 case AF_INET6:
556 if (sa->sa_family == AF_INET) {
557 sinaddr = &((struct sockaddr_in*)sa)->sin_addr;
558 } else {
559 sinaddr = &((struct sockaddr_in6*)sa)->sin6_addr;
560 }
561 if (inet_ntop(sa->sa_family, sinaddr, naddr, sizeof(naddr)) != naddr) {
562 strlcpy(naddr, "<unknown>", sizeof(naddr));
563 }
564 break;
565 case AF_LOCAL:
566 strlcpy(naddr, ((struct sockaddr_un *)sa)->sun_path, sizeof(naddr));
567 break;
568 default:
569 strlcpy(naddr, "<unsupported address family>", sizeof(naddr));
570 break;
571 }
572 #else
573 char naddr[1] = { 0 };
574 #endif
575
576 *nsop = NULL;
577
578 /* Create the socket. */
579 MALLOC(nso, struct nfs_socket *, sizeof(struct nfs_socket), M_TEMP, M_WAITOK | M_ZERO);
580 if (nso) {
581 MALLOC(nso->nso_saddr, struct sockaddr *, sa->sa_len, M_SONAME, M_WAITOK | M_ZERO);
582 }
583 if (!nso || !nso->nso_saddr) {
584 if (nso) {
585 FREE(nso, M_TEMP);
586 }
587 return ENOMEM;
588 }
589 lck_mtx_init(&nso->nso_lock, nfs_request_grp, LCK_ATTR_NULL);
590 nso->nso_sotype = sotype;
591 if (nso->nso_sotype == SOCK_STREAM) {
592 nfs_rpc_record_state_init(&nso->nso_rrs);
593 }
594 microuptime(&now);
595 nso->nso_timestamp = now.tv_sec;
596 bcopy(sa, nso->nso_saddr, sa->sa_len);
597 switch (sa->sa_family) {
598 case AF_INET:
599 case AF_INET6:
600 if (sa->sa_family == AF_INET) {
601 ((struct sockaddr_in*)nso->nso_saddr)->sin_port = htons(port);
602 } else if (sa->sa_family == AF_INET6) {
603 ((struct sockaddr_in6*)nso->nso_saddr)->sin6_port = htons(port);
604 }
605 break;
606 case AF_LOCAL:
607 break;
608 }
609 nso->nso_protocol = protocol;
610 nso->nso_version = vers;
611 nso->nso_nfs_min_vers = PVER2MAJOR(nmp->nm_min_vers);
612 nso->nso_nfs_max_vers = PVER2MAJOR(nmp->nm_max_vers);
613
614 error = sock_socket(sa->sa_family, nso->nso_sotype, 0, NULL, NULL, &nso->nso_so);
615
616 /* Some servers require that the client port be a reserved port number. */
617 if (!error && resvport && ((sa->sa_family == AF_INET) || (sa->sa_family == AF_INET6))) {
618 struct sockaddr_storage ss;
619 int level = (sa->sa_family == AF_INET) ? IPPROTO_IP : IPPROTO_IPV6;
620 int optname = (sa->sa_family == AF_INET) ? IP_PORTRANGE : IPV6_PORTRANGE;
621 int portrange = IP_PORTRANGE_LOW;
622
623 error = sock_setsockopt(nso->nso_so, level, optname, &portrange, sizeof(portrange));
624 if (!error) { /* bind now to check for failure */
625 ss.ss_len = sa->sa_len;
626 ss.ss_family = sa->sa_family;
627 if (ss.ss_family == AF_INET) {
628 ((struct sockaddr_in*)&ss)->sin_addr.s_addr = INADDR_ANY;
629 ((struct sockaddr_in*)&ss)->sin_port = htons(0);
630 } else if (ss.ss_family == AF_INET6) {
631 ((struct sockaddr_in6*)&ss)->sin6_addr = in6addr_any;
632 ((struct sockaddr_in6*)&ss)->sin6_port = htons(0);
633 } else {
634 error = EINVAL;
635 }
636 if (!error) {
637 error = sock_bind(nso->nso_so, (struct sockaddr*)&ss);
638 }
639 }
640 }
641
642 if (error) {
643 NFS_SOCK_DBG("nfs connect %s error %d creating socket %p %s type %d%s port %d prot %d %d\n",
644 vfs_statfs(nmp->nm_mountp)->f_mntfromname, error, nso, naddr, sotype,
645 resvport ? "r" : "", port, protocol, vers);
646 nfs_socket_destroy(nso);
647 } else {
648 NFS_SOCK_DBG("nfs connect %s created socket %p <%s> type %d%s port %d prot %d %d\n",
649 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, naddr,
650 sotype, resvport ? "r" : "", port, protocol, vers);
651 *nsop = nso;
652 }
653 return error;
654 }
655
656 /*
657 * Destroy an nfs_socket structure.
658 */
659 void
660 nfs_socket_destroy(struct nfs_socket *nso)
661 {
662 struct timespec ts = { .tv_sec = 4, .tv_nsec = 0 };
663
664 NFS_SOCK_DBG("Destoring socket %p flags = %8.8x error = %d\n", nso, nso->nso_flags, nso->nso_error);
665 lck_mtx_lock(&nso->nso_lock);
666 nso->nso_flags |= NSO_DISCONNECTING;
667 if (nso->nso_flags & NSO_UPCALL) { /* give upcall a chance to complete */
668 msleep(&nso->nso_flags, &nso->nso_lock, PZERO - 1, "nfswaitupcall", &ts);
669 }
670 lck_mtx_unlock(&nso->nso_lock);
671 sock_shutdown(nso->nso_so, SHUT_RDWR);
672 sock_close(nso->nso_so);
673 if (nso->nso_sotype == SOCK_STREAM) {
674 nfs_rpc_record_state_cleanup(&nso->nso_rrs);
675 }
676 lck_mtx_destroy(&nso->nso_lock, nfs_request_grp);
677 if (nso->nso_saddr) {
678 FREE(nso->nso_saddr, M_SONAME);
679 }
680 if (nso->nso_saddr2) {
681 FREE(nso->nso_saddr2, M_SONAME);
682 }
683 NFS_SOCK_DBG("nfs connect - socket %p destroyed\n", nso);
684 FREE(nso, M_TEMP);
685 }
686
687 /*
688 * Set common socket options on an nfs_socket.
689 */
690 void
691 nfs_socket_options(struct nfsmount *nmp, struct nfs_socket *nso)
692 {
693 /*
694 * Set socket send/receive timeouts
695 * - Receive timeout shouldn't matter because most receives are performed
696 * in the socket upcall non-blocking.
697 * - Send timeout should allow us to react to a blocked socket.
698 * Soft mounts will want to abort sooner.
699 */
700 struct timeval timeo;
701 int on = 1, proto, reserve, error;
702
703 timeo.tv_usec = 0;
704 timeo.tv_sec = (NMFLAG(nmp, SOFT) || nfs_can_squish(nmp)) ? 5 : 60;
705 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
706 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
707 if (nso->nso_sotype == SOCK_STREAM) {
708 /* Assume that SOCK_STREAM always requires a connection */
709 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof(on));
710 /* set nodelay for TCP */
711 sock_gettype(nso->nso_so, NULL, NULL, &proto);
712 if (proto == IPPROTO_TCP) {
713 sock_setsockopt(nso->nso_so, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
714 }
715 }
716
717 /* set socket buffer sizes for UDP/TCP */
718 reserve = (nso->nso_sotype == SOCK_DGRAM) ? NFS_UDPSOCKBUF : MAX(nfs_tcp_sockbuf, nmp->nm_wsize * 2);
719 {
720 error = sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_SNDBUF, &reserve, sizeof(reserve));
721 }
722
723 if (error) {
724 log(LOG_INFO, "nfs_socket_options: error %d setting SO_SNDBUF to %u\n", error, reserve);
725 }
726
727 reserve = (nso->nso_sotype == SOCK_DGRAM) ? NFS_UDPSOCKBUF : MAX(nfs_tcp_sockbuf, nmp->nm_rsize * 2);
728 error = sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_RCVBUF, &reserve, sizeof(reserve));
729 if (error) {
730 log(LOG_INFO, "nfs_socket_options: error %d setting SO_RCVBUF to %u\n", error, reserve);
731 }
732
733 /* set SO_NOADDRERR to detect network changes ASAP */
734 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
735 /* just playin' it safe with upcalls */
736 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
737 /* socket should be interruptible if the mount is */
738 if (!NMFLAG(nmp, INTR)) {
739 sock_nointerrupt(nso->nso_so, 1);
740 }
741 }
742
743 /*
744 * Release resources held in an nfs_socket_search.
745 */
746 void
747 nfs_socket_search_cleanup(struct nfs_socket_search *nss)
748 {
749 struct nfs_socket *nso, *nsonext;
750
751 TAILQ_FOREACH_SAFE(nso, &nss->nss_socklist, nso_link, nsonext) {
752 TAILQ_REMOVE(&nss->nss_socklist, nso, nso_link);
753 nss->nss_sockcnt--;
754 nfs_socket_destroy(nso);
755 }
756 if (nss->nss_sock) {
757 nfs_socket_destroy(nss->nss_sock);
758 nss->nss_sock = NULL;
759 }
760 }
761
762 /*
763 * Prefer returning certain errors over others.
764 * This function returns a ranking of the given error.
765 */
766 int
767 nfs_connect_error_class(int error)
768 {
769 switch (error) {
770 case 0:
771 return 0;
772 case ETIMEDOUT:
773 case EAGAIN:
774 return 1;
775 case EPIPE:
776 case EADDRNOTAVAIL:
777 case ENETDOWN:
778 case ENETUNREACH:
779 case ENETRESET:
780 case ECONNABORTED:
781 case ECONNRESET:
782 case EISCONN:
783 case ENOTCONN:
784 case ESHUTDOWN:
785 case ECONNREFUSED:
786 case EHOSTDOWN:
787 case EHOSTUNREACH:
788 return 2;
789 case ERPCMISMATCH:
790 case EPROCUNAVAIL:
791 case EPROGMISMATCH:
792 case EPROGUNAVAIL:
793 return 3;
794 case EBADRPC:
795 return 4;
796 default:
797 return 5;
798 }
799 }
800
801 /*
802 * Make sure a socket search returns the best error.
803 */
804 void
805 nfs_socket_search_update_error(struct nfs_socket_search *nss, int error)
806 {
807 if (nfs_connect_error_class(error) >= nfs_connect_error_class(nss->nss_error)) {
808 nss->nss_error = error;
809 }
810 }
811
812 /* nfs_connect_search_new_socket:
813 * Given a socket search structure for an nfs mount try to find a new socket from the set of addresses specified
814 * by nss.
815 *
816 * nss_last is set to -1 at initialization to indicate the first time. Its set to -2 if address was found but
817 * could not be used or if a socket timed out.
818 */
819 int
820 nfs_connect_search_new_socket(struct nfsmount *nmp, struct nfs_socket_search *nss, struct timeval *now)
821 {
822 struct nfs_fs_location *fsl;
823 struct nfs_fs_server *fss;
824 struct sockaddr_storage ss;
825 struct nfs_socket *nso;
826 char *addrstr;
827 int error = 0;
828
829
830 NFS_SOCK_DBG("nfs connect %s nss_addrcnt = %d\n",
831 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nss->nss_addrcnt);
832
833 /*
834 * while there are addresses and:
835 * we have no sockets or
836 * the last address failed and did not produce a socket (nss_last < 0) or
837 * Its been a while (2 seconds) and we have less than the max number of concurrent sockets to search (4)
838 * then attempt to create a socket with the current address.
839 */
840 while (nss->nss_addrcnt > 0 && ((nss->nss_last < 0) || (nss->nss_sockcnt == 0) ||
841 ((nss->nss_sockcnt < 4) && (now->tv_sec >= (nss->nss_last + 2))))) {
842 if (nmp->nm_sockflags & NMSOCK_UNMOUNT) {
843 return EINTR;
844 }
845 /* Can we convert the address to a sockaddr? */
846 fsl = nmp->nm_locations.nl_locations[nss->nss_nextloc.nli_loc];
847 fss = fsl->nl_servers[nss->nss_nextloc.nli_serv];
848 addrstr = fss->ns_addresses[nss->nss_nextloc.nli_addr];
849 NFS_SOCK_DBG("Trying address %s for program %d on port %d\n", addrstr, nss->nss_protocol, nss->nss_port);
850 if (*addrstr == '\0') {
851 /*
852 * We have an unspecified local domain address. We use the program to translate to
853 * a well known local transport address. We only support PMAPROG and NFS for this.
854 */
855 if (nss->nss_protocol == PMAPPROG) {
856 addrstr = (nss->nss_sotype == SOCK_DGRAM) ? RPCB_TICLTS_PATH : RPCB_TICOTSORD_PATH;
857 } else if (nss->nss_protocol == NFS_PROG) {
858 addrstr = nmp->nm_nfs_localport;
859 if (!addrstr || *addrstr == '\0') {
860 addrstr = (nss->nss_sotype == SOCK_DGRAM) ? NFS_TICLTS_PATH : NFS_TICOTSORD_PATH;
861 }
862 }
863 NFS_SOCK_DBG("Calling prog %d with <%s>\n", nss->nss_protocol, addrstr);
864 }
865 if (!nfs_uaddr2sockaddr(addrstr, (struct sockaddr*)&ss)) {
866 NFS_SOCK_DBG("Could not convert address %s to socket\n", addrstr);
867 nfs_location_next(&nmp->nm_locations, &nss->nss_nextloc);
868 nss->nss_addrcnt -= 1;
869 nss->nss_last = -2;
870 continue;
871 }
872 /* Check that socket family is acceptable. */
873 if (nmp->nm_sofamily && (ss.ss_family != nmp->nm_sofamily)) {
874 NFS_SOCK_DBG("Skipping socket family %d, want mount family %d\n", ss.ss_family, nmp->nm_sofamily);
875 nfs_location_next(&nmp->nm_locations, &nss->nss_nextloc);
876 nss->nss_addrcnt -= 1;
877 nss->nss_last = -2;
878 continue;
879 }
880
881 /* Create the socket. */
882 error = nfs_socket_create(nmp, (struct sockaddr*)&ss, nss->nss_sotype,
883 nss->nss_port, nss->nss_protocol, nss->nss_version,
884 ((nss->nss_protocol == NFS_PROG) && NMFLAG(nmp, RESVPORT)), &nso);
885 if (error) {
886 return error;
887 }
888
889 nso->nso_location = nss->nss_nextloc;
890 nso->nso_wake = nss;
891 error = sock_setupcall(nso->nso_so, nfs_connect_upcall, nso);
892 if (error) {
893 NFS_SOCK_DBG("sock_setupcall failed for socket %p setting nfs_connect_upcall error = %d\n", nso, error);
894 lck_mtx_lock(&nso->nso_lock);
895 nso->nso_error = error;
896 nso->nso_flags |= NSO_DEAD;
897 lck_mtx_unlock(&nso->nso_lock);
898 }
899
900 TAILQ_INSERT_TAIL(&nss->nss_socklist, nso, nso_link);
901 nss->nss_sockcnt++;
902 nfs_location_next(&nmp->nm_locations, &nss->nss_nextloc);
903 nss->nss_addrcnt -= 1;
904
905 nss->nss_last = now->tv_sec;
906 }
907
908 if (nss->nss_addrcnt == 0 && nss->nss_last < 0) {
909 nss->nss_last = now->tv_sec;
910 }
911
912 return error;
913 }
914
915 /*
916 * nfs_connect_search_socket_connect: Connect an nfs socket nso for nfsmount nmp.
917 * If successful set the socket options for the socket as require from the mount.
918 *
919 * Assumes: nso->nso_lock is held on entry and return.
920 */
921 int
922 nfs_connect_search_socket_connect(struct nfsmount *nmp, struct nfs_socket *nso, int verbose)
923 {
924 int error;
925
926 if ((nso->nso_sotype != SOCK_STREAM) && NMFLAG(nmp, NOCONNECT)) {
927 /* no connection needed, just say it's already connected */
928 NFS_SOCK_DBG("nfs connect %s UDP socket %p noconnect\n",
929 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
930 nso->nso_flags |= NSO_CONNECTED;
931 nfs_socket_options(nmp, nso);
932 return 1; /* Socket is connected and setup */
933 } else if (!(nso->nso_flags & NSO_CONNECTING)) {
934 /* initiate the connection */
935 nso->nso_flags |= NSO_CONNECTING;
936 lck_mtx_unlock(&nso->nso_lock);
937 NFS_SOCK_DBG("nfs connect %s connecting socket %p %s\n",
938 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso,
939 nso->nso_saddr->sa_family == AF_LOCAL ? ((struct sockaddr_un*)nso->nso_saddr)->sun_path : "");
940 error = sock_connect(nso->nso_so, nso->nso_saddr, MSG_DONTWAIT);
941 if (error) {
942 NFS_SOCK_DBG("nfs connect %s connecting socket %p returned %d\n",
943 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
944 }
945 lck_mtx_lock(&nso->nso_lock);
946 if (error && (error != EINPROGRESS)) {
947 nso->nso_error = error;
948 nso->nso_flags |= NSO_DEAD;
949 return 0;
950 }
951 }
952 if (nso->nso_flags & NSO_CONNECTING) {
953 /* check the connection */
954 if (sock_isconnected(nso->nso_so)) {
955 NFS_SOCK_DBG("nfs connect %s socket %p is connected\n",
956 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
957 nso->nso_flags &= ~NSO_CONNECTING;
958 nso->nso_flags |= NSO_CONNECTED;
959 nfs_socket_options(nmp, nso);
960 return 1; /* Socket is connected and setup */
961 } else {
962 int optlen = sizeof(error);
963 error = 0;
964 sock_getsockopt(nso->nso_so, SOL_SOCKET, SO_ERROR, &error, &optlen);
965 if (error) { /* we got an error on the socket */
966 NFS_SOCK_DBG("nfs connect %s socket %p connection error %d\n",
967 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
968 if (verbose) {
969 printf("nfs connect socket error %d for %s\n",
970 error, vfs_statfs(nmp->nm_mountp)->f_mntfromname);
971 }
972 nso->nso_error = error;
973 nso->nso_flags |= NSO_DEAD;
974 return 0;
975 }
976 }
977 }
978
979 return 0; /* Waiting to be connected */
980 }
981
982 /*
983 * nfs_connect_search_ping: Send a null proc on the nso socket.
984 */
985 int
986 nfs_connect_search_ping(struct nfsmount *nmp, struct nfs_socket *nso, struct timeval *now)
987 {
988 /* initiate a NULL RPC request */
989 uint64_t xid = nso->nso_pingxid;
990 mbuf_t m, mreq = NULL;
991 struct msghdr msg;
992 size_t reqlen, sentlen;
993 uint32_t vers = nso->nso_version;
994 int error;
995
996 if (!vers) {
997 if (nso->nso_protocol == PMAPPROG) {
998 vers = (nso->nso_saddr->sa_family == AF_INET) ? PMAPVERS : RPCBVERS4;
999 } else if (nso->nso_protocol == NFS_PROG) {
1000 vers = PVER2MAJOR(nmp->nm_max_vers);
1001 }
1002 }
1003 lck_mtx_unlock(&nso->nso_lock);
1004 NFS_SOCK_DBG("Pinging socket %p %d %d %d\n", nso, nso->nso_sotype, nso->nso_protocol, vers);
1005 error = nfsm_rpchead2(nmp, nso->nso_sotype, nso->nso_protocol, vers, 0, RPCAUTH_SYS,
1006 vfs_context_ucred(vfs_context_kernel()), NULL, NULL, &xid, &mreq);
1007 lck_mtx_lock(&nso->nso_lock);
1008 if (!error) {
1009 nso->nso_flags |= NSO_PINGING;
1010 nso->nso_pingxid = R_XID32(xid);
1011 nso->nso_reqtimestamp = now->tv_sec;
1012 bzero(&msg, sizeof(msg));
1013 if ((nso->nso_sotype != SOCK_STREAM) && !sock_isconnected(nso->nso_so)) {
1014 msg.msg_name = nso->nso_saddr;
1015 msg.msg_namelen = nso->nso_saddr->sa_len;
1016 }
1017 for (reqlen = 0, m = mreq; m; m = mbuf_next(m)) {
1018 reqlen += mbuf_len(m);
1019 }
1020 lck_mtx_unlock(&nso->nso_lock);
1021 NFS_SOCK_DUMP_MBUF("Sending ping packet", mreq);
1022 error = sock_sendmbuf(nso->nso_so, &msg, mreq, 0, &sentlen);
1023 NFS_SOCK_DBG("nfs connect %s verifying socket %p send rv %d\n",
1024 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
1025 lck_mtx_lock(&nso->nso_lock);
1026 if (!error && (sentlen != reqlen)) {
1027 error = ETIMEDOUT;
1028 }
1029 }
1030 if (error) {
1031 nso->nso_error = error;
1032 nso->nso_flags |= NSO_DEAD;
1033 return 0;
1034 }
1035
1036 return 1;
1037 }
1038
1039 /*
1040 * nfs_connect_search_socket_found: Take the found socket of the socket search list and assign it to the searched socket.
1041 * Set the nfs socket protocol and version if needed.
1042 */
1043 void
1044 nfs_connect_search_socket_found(struct nfsmount *nmp, struct nfs_socket_search *nss, struct nfs_socket *nso)
1045 {
1046 NFS_SOCK_DBG("nfs connect %s socket %p verified\n",
1047 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
1048 if (!nso->nso_version) {
1049 /* If the version isn't set, the default must have worked. */
1050 if (nso->nso_protocol == PMAPPROG) {
1051 nso->nso_version = (nso->nso_saddr->sa_family == AF_INET) ? PMAPVERS : RPCBVERS4;
1052 }
1053 if (nso->nso_protocol == NFS_PROG) {
1054 nso->nso_version = PVER2MAJOR(nmp->nm_max_vers);
1055 }
1056 }
1057 TAILQ_REMOVE(&nss->nss_socklist, nso, nso_link);
1058 nss->nss_sockcnt--;
1059 nss->nss_sock = nso;
1060 }
1061
1062 /*
1063 * nfs_connect_search_socket_reap: For each socket in the search list mark any timed out socket as dead and remove from
1064 * the list. Dead socket are then destroyed.
1065 */
1066 void
1067 nfs_connect_search_socket_reap(struct nfsmount *nmp __unused, struct nfs_socket_search *nss, struct timeval *now)
1068 {
1069 struct nfs_socket *nso, *nsonext;
1070
1071 TAILQ_FOREACH_SAFE(nso, &nss->nss_socklist, nso_link, nsonext) {
1072 lck_mtx_lock(&nso->nso_lock);
1073 if (now->tv_sec >= (nso->nso_timestamp + nss->nss_timeo)) {
1074 /* took too long */
1075 NFS_SOCK_DBG("nfs connect %s socket %p timed out\n",
1076 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
1077 nso->nso_error = ETIMEDOUT;
1078 nso->nso_flags |= NSO_DEAD;
1079 }
1080 if (!(nso->nso_flags & NSO_DEAD)) {
1081 lck_mtx_unlock(&nso->nso_lock);
1082 continue;
1083 }
1084 lck_mtx_unlock(&nso->nso_lock);
1085 NFS_SOCK_DBG("nfs connect %s reaping socket %p error = %d flags = %8.8x\n",
1086 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, nso->nso_error, nso->nso_flags);
1087 nfs_socket_search_update_error(nss, nso->nso_error);
1088 TAILQ_REMOVE(&nss->nss_socklist, nso, nso_link);
1089 nss->nss_sockcnt--;
1090 nfs_socket_destroy(nso);
1091 /* If there are more sockets to try, force the starting of another socket */
1092 if (nss->nss_addrcnt > 0) {
1093 nss->nss_last = -2;
1094 }
1095 }
1096 }
1097
1098 /*
1099 * nfs_connect_search_check: Check on the status of search and wait for replies if needed.
1100 */
1101 int
1102 nfs_connect_search_check(struct nfsmount *nmp, struct nfs_socket_search *nss, struct timeval *now)
1103 {
1104 int error;
1105
1106 /* log a warning if connect is taking a while */
1107 if (((now->tv_sec - nss->nss_timestamp) >= 8) && ((nss->nss_flags & (NSS_VERBOSE | NSS_WARNED)) == NSS_VERBOSE)) {
1108 printf("nfs_connect: socket connect taking a while for %s\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1109 nss->nss_flags |= NSS_WARNED;
1110 }
1111 if (nmp->nm_sockflags & NMSOCK_UNMOUNT) {
1112 return EINTR;
1113 }
1114 if ((error = nfs_sigintr(nmp, NULL, current_thread(), 0))) {
1115 return error;
1116 }
1117
1118 /* If we were succesfull at sending a ping, wait up to a second for a reply */
1119 if (nss->nss_last >= 0) {
1120 tsleep(nss, PSOCK, "nfs_connect_search_wait", hz);
1121 }
1122
1123 return 0;
1124 }
1125
1126
1127 /*
1128 * Continue the socket search until we have something to report.
1129 */
1130 int
1131 nfs_connect_search_loop(struct nfsmount *nmp, struct nfs_socket_search *nss)
1132 {
1133 struct nfs_socket *nso;
1134 struct timeval now;
1135 int error;
1136 int verbose = (nss->nss_flags & NSS_VERBOSE);
1137
1138 loop:
1139 microuptime(&now);
1140 NFS_SOCK_DBG("nfs connect %s search %ld\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, now.tv_sec);
1141
1142 /* add a new socket to the socket list if needed and available */
1143 error = nfs_connect_search_new_socket(nmp, nss, &now);
1144 if (error) {
1145 NFS_SOCK_DBG("nfs connect returned %d\n", error);
1146 return error;
1147 }
1148
1149 /* check each active socket on the list and try to push it along */
1150 TAILQ_FOREACH(nso, &nss->nss_socklist, nso_link) {
1151 lck_mtx_lock(&nso->nso_lock);
1152
1153 /* If not connected connect it */
1154 if (!(nso->nso_flags & NSO_CONNECTED)) {
1155 if (!nfs_connect_search_socket_connect(nmp, nso, verbose)) {
1156 lck_mtx_unlock(&nso->nso_lock);
1157 continue;
1158 }
1159 }
1160
1161 /* If the socket hasn't been verified or in a ping, ping it. We also handle UDP retransmits */
1162 if (!(nso->nso_flags & (NSO_PINGING | NSO_VERIFIED)) ||
1163 ((nso->nso_sotype == SOCK_DGRAM) && (now.tv_sec >= nso->nso_reqtimestamp + 2))) {
1164 if (!nfs_connect_search_ping(nmp, nso, &now)) {
1165 lck_mtx_unlock(&nso->nso_lock);
1166 continue;
1167 }
1168 }
1169
1170 /* Has the socket been verified by the up call routine? */
1171 if (nso->nso_flags & NSO_VERIFIED) {
1172 /* WOOHOO!! This socket looks good! */
1173 nfs_connect_search_socket_found(nmp, nss, nso);
1174 lck_mtx_unlock(&nso->nso_lock);
1175 break;
1176 }
1177 lck_mtx_unlock(&nso->nso_lock);
1178 }
1179
1180 /* Check for timed out sockets and mark as dead and then remove all dead sockets. */
1181 nfs_connect_search_socket_reap(nmp, nss, &now);
1182
1183 /*
1184 * Keep looping if we haven't found a socket yet and we have more
1185 * sockets to (continue to) try.
1186 */
1187 error = 0;
1188 if (!nss->nss_sock && (!TAILQ_EMPTY(&nss->nss_socklist) || nss->nss_addrcnt)) {
1189 error = nfs_connect_search_check(nmp, nss, &now);
1190 if (!error) {
1191 goto loop;
1192 }
1193 }
1194
1195 NFS_SOCK_DBG("nfs connect %s returning %d\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, error);
1196 return error;
1197 }
1198
1199 /*
1200 * Initialize a new NFS connection.
1201 *
1202 * Search for a location to connect a socket to and initialize the connection.
1203 *
1204 * An NFS mount may have multiple locations/servers/addresses available.
1205 * We attempt to connect to each one asynchronously and will start
1206 * several sockets in parallel if other locations are slow to answer.
1207 * We'll use the first NFS socket we can successfully set up.
1208 *
1209 * The search may involve contacting the portmapper service first.
1210 *
1211 * A mount's initial connection may require negotiating some parameters such
1212 * as socket type and NFS version.
1213 */
1214
1215 int
1216 nfs_connect(struct nfsmount *nmp, int verbose, int timeo)
1217 {
1218 struct nfs_socket_search nss;
1219 struct nfs_socket *nso, *nsonfs;
1220 struct sockaddr_storage ss;
1221 struct sockaddr *saddr, *oldsaddr;
1222 sock_upcall upcall;
1223 #if CONFIG_NFS4
1224 struct timeval now;
1225 #endif
1226 struct timeval start;
1227 int error, savederror, nfsvers;
1228 int tryv4 = 1;
1229 uint8_t sotype = nmp->nm_sotype ? nmp->nm_sotype : SOCK_STREAM;
1230 fhandle_t *fh = NULL;
1231 char *path = NULL;
1232 in_port_t port = 0;
1233 int addrtotal = 0;
1234
1235 /* paranoia... check that we have at least one address in the locations */
1236 uint32_t loc, serv;
1237 for (loc = 0; loc < nmp->nm_locations.nl_numlocs; loc++) {
1238 for (serv = 0; serv < nmp->nm_locations.nl_locations[loc]->nl_servcount; serv++) {
1239 addrtotal += nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addrcount;
1240 if (nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addrcount == 0) {
1241 NFS_SOCK_DBG("nfs connect %s search, server %s has no addresses\n",
1242 vfs_statfs(nmp->nm_mountp)->f_mntfromname,
1243 nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_name);
1244 }
1245 }
1246 }
1247
1248 if (addrtotal == 0) {
1249 NFS_SOCK_DBG("nfs connect %s search failed, no addresses\n",
1250 vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1251 return EINVAL;
1252 } else {
1253 NFS_SOCK_DBG("nfs connect %s has %d addresses\n",
1254 vfs_statfs(nmp->nm_mountp)->f_mntfromname, addrtotal);
1255 }
1256
1257 lck_mtx_lock(&nmp->nm_lock);
1258 nmp->nm_sockflags |= NMSOCK_CONNECTING;
1259 nmp->nm_nss = &nss;
1260 lck_mtx_unlock(&nmp->nm_lock);
1261 microuptime(&start);
1262 savederror = error = 0;
1263
1264 tryagain:
1265 /* initialize socket search state */
1266 bzero(&nss, sizeof(nss));
1267 nss.nss_addrcnt = addrtotal;
1268 nss.nss_error = savederror;
1269 TAILQ_INIT(&nss.nss_socklist);
1270 nss.nss_sotype = sotype;
1271 nss.nss_startloc = nmp->nm_locations.nl_current;
1272 nss.nss_timestamp = start.tv_sec;
1273 nss.nss_timeo = timeo;
1274 if (verbose) {
1275 nss.nss_flags |= NSS_VERBOSE;
1276 }
1277
1278 /* First time connecting, we may need to negotiate some things */
1279 if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) {
1280 NFS_SOCK_DBG("so_family = %d\n", nmp->nm_sofamily);
1281 NFS_SOCK_DBG("nfs port = %d local: <%s>\n", nmp->nm_nfsport, nmp->nm_nfs_localport ? nmp->nm_nfs_localport : "");
1282 NFS_SOCK_DBG("mount port = %d local: <%s>\n", nmp->nm_mountport, nmp->nm_mount_localport ? nmp->nm_mount_localport : "");
1283 if (!nmp->nm_vers) {
1284 /* No NFS version specified... */
1285 if (!nmp->nm_nfsport || (!NM_OMATTR_GIVEN(nmp, FH) && !nmp->nm_mountport)) {
1286 #if CONFIG_NFS4
1287 if (PVER2MAJOR(nmp->nm_max_vers) >= NFS_VER4 && tryv4) {
1288 nss.nss_port = NFS_PORT;
1289 nss.nss_protocol = NFS_PROG;
1290 nss.nss_version = 4;
1291 nss.nss_flags |= NSS_FALLBACK2PMAP;
1292 } else {
1293 #endif
1294 /* ...connect to portmapper first if we (may) need any ports. */
1295 nss.nss_port = PMAPPORT;
1296 nss.nss_protocol = PMAPPROG;
1297 nss.nss_version = 0;
1298 #if CONFIG_NFS4
1299 }
1300 #endif
1301 } else {
1302 /* ...connect to NFS port first. */
1303 nss.nss_port = nmp->nm_nfsport;
1304 nss.nss_protocol = NFS_PROG;
1305 nss.nss_version = 0;
1306 }
1307 #if CONFIG_NFS4
1308 } else if (nmp->nm_vers >= NFS_VER4) {
1309 if (tryv4) {
1310 /* For NFSv4, we use the given (or default) port. */
1311 nss.nss_port = nmp->nm_nfsport ? nmp->nm_nfsport : NFS_PORT;
1312 nss.nss_protocol = NFS_PROG;
1313 nss.nss_version = 4;
1314 /*
1315 * set NSS_FALLBACK2PMAP here to pick up any non standard port
1316 * if no port is specified on the mount;
1317 * Note nm_vers is set so we will only try NFS_VER4.
1318 */
1319 if (!nmp->nm_nfsport) {
1320 nss.nss_flags |= NSS_FALLBACK2PMAP;
1321 }
1322 } else {
1323 nss.nss_port = PMAPPORT;
1324 nss.nss_protocol = PMAPPROG;
1325 nss.nss_version = 0;
1326 }
1327 #endif
1328 } else {
1329 /* For NFSv3/v2... */
1330 if (!nmp->nm_nfsport || (!NM_OMATTR_GIVEN(nmp, FH) && !nmp->nm_mountport)) {
1331 /* ...connect to portmapper first if we need any ports. */
1332 nss.nss_port = PMAPPORT;
1333 nss.nss_protocol = PMAPPROG;
1334 nss.nss_version = 0;
1335 } else {
1336 /* ...connect to NFS port first. */
1337 nss.nss_port = nmp->nm_nfsport;
1338 nss.nss_protocol = NFS_PROG;
1339 nss.nss_version = nmp->nm_vers;
1340 }
1341 }
1342 NFS_SOCK_DBG("nfs connect first %s, so type %d port %d prot %d %d\n",
1343 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nss.nss_sotype, nss.nss_port,
1344 nss.nss_protocol, nss.nss_version);
1345 } else {
1346 /* we've connected before, just connect to NFS port */
1347 if (!nmp->nm_nfsport) {
1348 /* need to ask portmapper which port that would be */
1349 nss.nss_port = PMAPPORT;
1350 nss.nss_protocol = PMAPPROG;
1351 nss.nss_version = 0;
1352 } else {
1353 nss.nss_port = nmp->nm_nfsport;
1354 nss.nss_protocol = NFS_PROG;
1355 nss.nss_version = nmp->nm_vers;
1356 }
1357 NFS_SOCK_DBG("nfs connect %s, so type %d port %d prot %d %d\n",
1358 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nss.nss_sotype, nss.nss_port,
1359 nss.nss_protocol, nss.nss_version);
1360 }
1361
1362 /* Set next location to first valid location. */
1363 /* If start location is invalid, find next location. */
1364 nss.nss_nextloc = nss.nss_startloc;
1365 if ((nss.nss_nextloc.nli_serv >= nmp->nm_locations.nl_locations[nss.nss_nextloc.nli_loc]->nl_servcount) ||
1366 (nss.nss_nextloc.nli_addr >= nmp->nm_locations.nl_locations[nss.nss_nextloc.nli_loc]->nl_servers[nss.nss_nextloc.nli_serv]->ns_addrcount)) {
1367 nfs_location_next(&nmp->nm_locations, &nss.nss_nextloc);
1368 if (!nfs_location_index_cmp(&nss.nss_nextloc, &nss.nss_startloc)) {
1369 NFS_SOCK_DBG("nfs connect %s search failed, couldn't find a valid location index\n",
1370 vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1371 return ENOENT;
1372 }
1373 }
1374 nss.nss_last = -1;
1375
1376 keepsearching:
1377
1378 error = nfs_connect_search_loop(nmp, &nss);
1379 if (error || !nss.nss_sock) {
1380 /* search failed */
1381 nfs_socket_search_cleanup(&nss);
1382 if (nss.nss_flags & NSS_FALLBACK2PMAP) {
1383 tryv4 = 0;
1384 NFS_SOCK_DBG("nfs connect %s TCP failed for V4 %d %d, trying PORTMAP\n",
1385 vfs_statfs(nmp->nm_mountp)->f_mntfromname, error, nss.nss_error);
1386 goto tryagain;
1387 }
1388
1389 if (!error && (nss.nss_sotype == SOCK_STREAM) && !nmp->nm_sotype && (nmp->nm_vers < NFS_VER4)) {
1390 /* Try using UDP */
1391 sotype = SOCK_DGRAM;
1392 savederror = nss.nss_error;
1393 NFS_SOCK_DBG("nfs connect %s TCP failed %d %d, trying UDP\n",
1394 vfs_statfs(nmp->nm_mountp)->f_mntfromname, error, nss.nss_error);
1395 goto tryagain;
1396 }
1397 if (!error) {
1398 error = nss.nss_error ? nss.nss_error : ETIMEDOUT;
1399 }
1400 lck_mtx_lock(&nmp->nm_lock);
1401 nmp->nm_sockflags &= ~NMSOCK_CONNECTING;
1402 nmp->nm_nss = NULL;
1403 lck_mtx_unlock(&nmp->nm_lock);
1404 if (nss.nss_flags & NSS_WARNED) {
1405 log(LOG_INFO, "nfs_connect: socket connect aborted for %s\n",
1406 vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1407 }
1408 if (fh) {
1409 NFS_ZFREE(nfs_fhandle_zone, fh);
1410 }
1411 if (path) {
1412 NFS_ZFREE(ZV_NAMEI, path);
1413 }
1414 NFS_SOCK_DBG("nfs connect %s search failed, returning %d\n",
1415 vfs_statfs(nmp->nm_mountp)->f_mntfromname, error);
1416 return error;
1417 }
1418
1419 /* try to use nss_sock */
1420 nso = nss.nss_sock;
1421 nss.nss_sock = NULL;
1422
1423 /* We may be speaking to portmap first... to determine port(s). */
1424 if (nso->nso_saddr->sa_family == AF_INET) {
1425 port = ntohs(((struct sockaddr_in*)nso->nso_saddr)->sin_port);
1426 } else if (nso->nso_saddr->sa_family == AF_INET6) {
1427 port = ntohs(((struct sockaddr_in6*)nso->nso_saddr)->sin6_port);
1428 } else if (nso->nso_saddr->sa_family == AF_LOCAL) {
1429 if (nso->nso_protocol == PMAPPROG) {
1430 port = PMAPPORT;
1431 }
1432 }
1433
1434 if (port == PMAPPORT) {
1435 /* Use this portmapper port to get the port #s we need. */
1436 NFS_SOCK_DBG("nfs connect %s got portmapper socket %p\n",
1437 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
1438
1439 /* remove the connect upcall so nfs_portmap_lookup() can use this socket */
1440 sock_setupcall(nso->nso_so, NULL, NULL);
1441
1442 /* Set up socket address and port for NFS socket. */
1443 bcopy(nso->nso_saddr, &ss, nso->nso_saddr->sa_len);
1444
1445 /* If NFS version not set, try nm_max_vers down to nm_min_vers */
1446 nfsvers = nmp->nm_vers ? nmp->nm_vers : PVER2MAJOR(nmp->nm_max_vers);
1447 if (!(port = nmp->nm_nfsport)) {
1448 if (ss.ss_family == AF_INET) {
1449 ((struct sockaddr_in*)&ss)->sin_port = htons(0);
1450 } else if (ss.ss_family == AF_INET6) {
1451 ((struct sockaddr_in6*)&ss)->sin6_port = htons(0);
1452 } else if (ss.ss_family == AF_LOCAL) {
1453 if (((struct sockaddr_un*)&ss)->sun_path[0] == '/') {
1454 NFS_SOCK_DBG("Looking up NFS socket over %s\n", ((struct sockaddr_un*)&ss)->sun_path);
1455 }
1456 }
1457 for (; nfsvers >= (int)PVER2MAJOR(nmp->nm_min_vers); nfsvers--) {
1458 if (nmp->nm_vers && nmp->nm_vers != nfsvers) {
1459 continue; /* Wrong version */
1460 }
1461 #if CONFIG_NFS4
1462 if (nfsvers == NFS_VER4 && nso->nso_sotype == SOCK_DGRAM) {
1463 continue; /* NFSv4 does not do UDP */
1464 }
1465 #endif
1466 if (ss.ss_family == AF_LOCAL && nmp->nm_nfs_localport) {
1467 struct sockaddr_un *sun = (struct sockaddr_un *)&ss;
1468 NFS_SOCK_DBG("Using supplied local address %s for NFS_PROG\n", nmp->nm_nfs_localport);
1469 strlcpy(sun->sun_path, nmp->nm_nfs_localport, sizeof(sun->sun_path));
1470 error = 0;
1471 } else {
1472 NFS_SOCK_DBG("Calling Portmap/Rpcbind for NFS_PROG");
1473 error = nfs_portmap_lookup(nmp, vfs_context_current(), (struct sockaddr*)&ss,
1474 nso->nso_so, NFS_PROG, nfsvers, nso->nso_sotype, timeo);
1475 }
1476 if (!error) {
1477 if (ss.ss_family == AF_INET) {
1478 port = ntohs(((struct sockaddr_in*)&ss)->sin_port);
1479 } else if (ss.ss_family == AF_INET6) {
1480 port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port);
1481 } else if (ss.ss_family == AF_LOCAL) {
1482 port = ((struct sockaddr_un *)&ss)->sun_path[0] ? NFS_PORT : 0;
1483 }
1484 if (!port) {
1485 error = EPROGUNAVAIL;
1486 }
1487 #if CONFIG_NFS4
1488 if (port == NFS_PORT && nfsvers == NFS_VER4 && tryv4 == 0) {
1489 continue; /* We already tried this */
1490 }
1491 #endif
1492 }
1493 if (!error) {
1494 break;
1495 }
1496 }
1497 if (nfsvers < (int)PVER2MAJOR(nmp->nm_min_vers) && error == 0) {
1498 error = EPROGUNAVAIL;
1499 }
1500 if (error) {
1501 nfs_socket_search_update_error(&nss, error);
1502 nfs_socket_destroy(nso);
1503 NFS_SOCK_DBG("Could not lookup NFS socket address for version %d error = %d\n", nfsvers, error);
1504 goto keepsearching;
1505 }
1506 } else if (nmp->nm_nfs_localport) {
1507 strlcpy(((struct sockaddr_un*)&ss)->sun_path, nmp->nm_nfs_localport, sizeof(((struct sockaddr_un*)&ss)->sun_path));
1508 NFS_SOCK_DBG("Using supplied nfs_local_port %s for NFS_PROG\n", nmp->nm_nfs_localport);
1509 }
1510
1511 /* Create NFS protocol socket and add it to the list of sockets. */
1512 /* N.B. If nfsvers is NFS_VER4 at this point then we're on a non standard port */
1513 if (ss.ss_family == AF_LOCAL) {
1514 NFS_SOCK_DBG("Creating NFS socket for %s port = %d\n", ((struct sockaddr_un*)&ss)->sun_path, port);
1515 }
1516 error = nfs_socket_create(nmp, (struct sockaddr*)&ss, nso->nso_sotype, port,
1517 NFS_PROG, nfsvers, NMFLAG(nmp, RESVPORT), &nsonfs);
1518 if (error) {
1519 nfs_socket_search_update_error(&nss, error);
1520 nfs_socket_destroy(nso);
1521 NFS_SOCK_DBG("Could not create NFS socket: %d\n", error);
1522 goto keepsearching;
1523 }
1524 nsonfs->nso_location = nso->nso_location;
1525 nsonfs->nso_wake = &nss;
1526 error = sock_setupcall(nsonfs->nso_so, nfs_connect_upcall, nsonfs);
1527 if (error) {
1528 nfs_socket_search_update_error(&nss, error);
1529 nfs_socket_destroy(nsonfs);
1530 nfs_socket_destroy(nso);
1531 NFS_SOCK_DBG("Could not nfs_connect_upcall: %d", error);
1532 goto keepsearching;
1533 }
1534 TAILQ_INSERT_TAIL(&nss.nss_socklist, nsonfs, nso_link);
1535 nss.nss_sockcnt++;
1536 if ((nfsvers < NFS_VER4) && !(nmp->nm_sockflags & NMSOCK_HASCONNECTED) && !NM_OMATTR_GIVEN(nmp, FH)) {
1537 /* Set up socket address and port for MOUNT socket. */
1538 error = 0;
1539 bcopy(nso->nso_saddr, &ss, nso->nso_saddr->sa_len);
1540 port = nmp->nm_mountport;
1541 NFS_SOCK_DBG("mount port = %d\n", port);
1542 if (ss.ss_family == AF_INET) {
1543 ((struct sockaddr_in*)&ss)->sin_port = htons(port);
1544 } else if (ss.ss_family == AF_INET6) {
1545 ((struct sockaddr_in6*)&ss)->sin6_port = htons(port);
1546 } else if (ss.ss_family == AF_LOCAL && nmp->nm_mount_localport) {
1547 NFS_SOCK_DBG("Setting mount address to %s port = %d\n", nmp->nm_mount_localport, nmp->nm_mountport);
1548 strlcpy(((struct sockaddr_un*)&ss)->sun_path, nmp->nm_mount_localport, sizeof(((struct sockaddr_un*)&ss)->sun_path));
1549 }
1550 if (!port) {
1551 /* Get port/sockaddr for MOUNT version corresponding to NFS version. */
1552 /* If NFS version is unknown, optimistically choose for NFSv3. */
1553 int mntvers = (nfsvers == NFS_VER2) ? RPCMNT_VER1 : RPCMNT_VER3;
1554 int mntproto = (NM_OMFLAG(nmp, MNTUDP) || (nso->nso_sotype == SOCK_DGRAM)) ? IPPROTO_UDP : IPPROTO_TCP;
1555 NFS_SOCK_DBG("Looking up mount port with socket %p\n", nso->nso_so);
1556 error = nfs_portmap_lookup(nmp, vfs_context_current(), (struct sockaddr*)&ss,
1557 nso->nso_so, RPCPROG_MNT, mntvers, mntproto == IPPROTO_UDP ? SOCK_DGRAM : SOCK_STREAM, timeo);
1558 }
1559 if (!error) {
1560 if (ss.ss_family == AF_INET) {
1561 port = ntohs(((struct sockaddr_in*)&ss)->sin_port);
1562 } else if (ss.ss_family == AF_INET6) {
1563 port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port);
1564 } else if (ss.ss_family == AF_LOCAL) {
1565 port = (((struct sockaddr_un*)&ss)->sun_path[0] != '\0');
1566 }
1567 if (!port) {
1568 error = EPROGUNAVAIL;
1569 }
1570 }
1571 /* create sockaddr for MOUNT */
1572 if (!error) {
1573 MALLOC(nsonfs->nso_saddr2, struct sockaddr *, ss.ss_len, M_SONAME, M_WAITOK | M_ZERO);
1574 }
1575 if (!error && !nsonfs->nso_saddr2) {
1576 error = ENOMEM;
1577 }
1578 if (!error) {
1579 bcopy(&ss, nsonfs->nso_saddr2, ss.ss_len);
1580 }
1581 if (error) {
1582 NFS_SOCK_DBG("Could not create mount sockaet address %d", error);
1583 lck_mtx_lock(&nsonfs->nso_lock);
1584 nsonfs->nso_error = error;
1585 nsonfs->nso_flags |= NSO_DEAD;
1586 lck_mtx_unlock(&nsonfs->nso_lock);
1587 }
1588 }
1589 NFS_SOCK_DBG("Destroying socket %p so %p\n", nso, nso->nso_so);
1590 nfs_socket_destroy(nso);
1591 goto keepsearching;
1592 }
1593
1594 /* nso is an NFS socket */
1595 NFS_SOCK_DBG("nfs connect %s got NFS socket %p\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
1596
1597 /* If NFS version wasn't specified, it was determined during the connect. */
1598 nfsvers = nmp->nm_vers ? nmp->nm_vers : (int)nso->nso_version;
1599
1600 /* Perform MOUNT call for initial NFSv2/v3 connection/mount. */
1601 if ((nfsvers < NFS_VER4) && !(nmp->nm_sockflags & NMSOCK_HASCONNECTED) && !NM_OMATTR_GIVEN(nmp, FH)) {
1602 error = 0;
1603 saddr = nso->nso_saddr2;
1604 if (!saddr) {
1605 /* Need sockaddr for MOUNT port */
1606 NFS_SOCK_DBG("Getting mount address mountport = %d, mount_localport = %s\n", nmp->nm_mountport, nmp->nm_mount_localport);
1607 bcopy(nso->nso_saddr, &ss, nso->nso_saddr->sa_len);
1608 port = nmp->nm_mountport;
1609 if (ss.ss_family == AF_INET) {
1610 ((struct sockaddr_in*)&ss)->sin_port = htons(port);
1611 } else if (ss.ss_family == AF_INET6) {
1612 ((struct sockaddr_in6*)&ss)->sin6_port = htons(port);
1613 } else if (ss.ss_family == AF_LOCAL && nmp->nm_mount_localport) {
1614 NFS_SOCK_DBG("Setting mount address to %s port = %d\n", nmp->nm_mount_localport, nmp->nm_mountport);
1615 strlcpy(((struct sockaddr_un*)&ss)->sun_path, nmp->nm_mount_localport, sizeof(((struct sockaddr_un*)&ss)->sun_path));
1616 }
1617 if (!port) {
1618 /* Get port/sockaddr for MOUNT version corresponding to NFS version. */
1619 int mntvers = (nfsvers == NFS_VER2) ? RPCMNT_VER1 : RPCMNT_VER3;
1620 int so_type = NM_OMFLAG(nmp, MNTUDP) ? SOCK_DGRAM : nso->nso_sotype;
1621 error = nfs_portmap_lookup(nmp, vfs_context_current(), (struct sockaddr*)&ss,
1622 NULL, RPCPROG_MNT, mntvers, so_type, timeo);
1623 if (ss.ss_family == AF_INET) {
1624 port = ntohs(((struct sockaddr_in*)&ss)->sin_port);
1625 } else if (ss.ss_family == AF_INET6) {
1626 port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port);
1627 }
1628 }
1629 if (!error) {
1630 if (port) {
1631 saddr = (struct sockaddr*)&ss;
1632 } else {
1633 error = EPROGUNAVAIL;
1634 }
1635 }
1636 }
1637 if (!error) {
1638 error = nfs3_check_lockmode(nmp, saddr, nso->nso_sotype, timeo);
1639 if (error) {
1640 nfs_socket_search_update_error(&nss, error);
1641 nfs_socket_destroy(nso);
1642 return error;
1643 }
1644 }
1645 if (saddr) {
1646 fh = zalloc(nfs_fhandle_zone);
1647 }
1648 if (saddr && fh) {
1649 path = zalloc(ZV_NAMEI);
1650 }
1651 if (!saddr || !fh || !path) {
1652 if (!error) {
1653 error = ENOMEM;
1654 }
1655 if (fh) {
1656 NFS_ZFREE(nfs_fhandle_zone, fh);
1657 }
1658 if (path) {
1659 NFS_ZFREE(ZV_NAMEI, path);
1660 }
1661 nfs_socket_search_update_error(&nss, error);
1662 nfs_socket_destroy(nso);
1663 goto keepsearching;
1664 }
1665 nfs_location_mntfromname(&nmp->nm_locations, nso->nso_location, path, MAXPATHLEN, 1);
1666 error = nfs3_mount_rpc(nmp, saddr, nso->nso_sotype, nfsvers,
1667 path, vfs_context_current(), timeo, fh, &nmp->nm_servsec);
1668 NFS_SOCK_DBG("nfs connect %s socket %p mount %d\n",
1669 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
1670 if (!error) {
1671 /* Make sure we can agree on a security flavor. */
1672 int o, s; /* indices into mount option and server security flavor lists */
1673 int found = 0;
1674
1675 if ((nfsvers == NFS_VER3) && !nmp->nm_servsec.count) {
1676 /* Some servers return an empty list to indicate RPCAUTH_SYS? */
1677 nmp->nm_servsec.count = 1;
1678 nmp->nm_servsec.flavors[0] = RPCAUTH_SYS;
1679 }
1680 if (nmp->nm_sec.count) {
1681 /* Choose the first flavor in our list that the server supports. */
1682 if (!nmp->nm_servsec.count) {
1683 /* we don't know what the server supports, just use our first choice */
1684 nmp->nm_auth = nmp->nm_sec.flavors[0];
1685 found = 1;
1686 }
1687 for (o = 0; !found && (o < nmp->nm_sec.count); o++) {
1688 for (s = 0; !found && (s < nmp->nm_servsec.count); s++) {
1689 if (nmp->nm_sec.flavors[o] == nmp->nm_servsec.flavors[s]) {
1690 nmp->nm_auth = nmp->nm_sec.flavors[o];
1691 found = 1;
1692 }
1693 }
1694 }
1695 } else {
1696 /* Choose the first one we support from the server's list. */
1697 if (!nmp->nm_servsec.count) {
1698 nmp->nm_auth = RPCAUTH_SYS;
1699 found = 1;
1700 }
1701 for (s = 0; s < nmp->nm_servsec.count; s++) {
1702 switch (nmp->nm_servsec.flavors[s]) {
1703 case RPCAUTH_SYS:
1704 /* prefer RPCAUTH_SYS to RPCAUTH_NONE */
1705 if (found && (nmp->nm_auth == RPCAUTH_NONE)) {
1706 found = 0;
1707 }
1708 OS_FALLTHROUGH;
1709 case RPCAUTH_NONE:
1710 case RPCAUTH_KRB5:
1711 case RPCAUTH_KRB5I:
1712 case RPCAUTH_KRB5P:
1713 if (!found) {
1714 nmp->nm_auth = nmp->nm_servsec.flavors[s];
1715 found = 1;
1716 }
1717 break;
1718 }
1719 }
1720 }
1721 error = !found ? EAUTH : 0;
1722 }
1723 NFS_ZFREE(ZV_NAMEI, path);
1724 if (error) {
1725 nfs_socket_search_update_error(&nss, error);
1726 NFS_ZFREE(nfs_fhandle_zone, fh);
1727 nfs_socket_destroy(nso);
1728 goto keepsearching;
1729 }
1730 if (nmp->nm_fh) {
1731 NFS_ZFREE(nfs_fhandle_zone, nmp->nm_fh);
1732 }
1733 nmp->nm_fh = fh;
1734 fh = NULL;
1735 NFS_BITMAP_SET(nmp->nm_flags, NFS_MFLAG_CALLUMNT);
1736 }
1737
1738 /* put the real upcall in place */
1739 upcall = (nso->nso_sotype == SOCK_STREAM) ? nfs_tcp_rcv : nfs_udp_rcv;
1740 error = sock_setupcall(nso->nso_so, upcall, nmp);
1741 if (error) {
1742 nfs_socket_search_update_error(&nss, error);
1743 nfs_socket_destroy(nso);
1744 goto keepsearching;
1745 }
1746
1747 if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) {
1748 /* set mntfromname to this location */
1749 if (!NM_OMATTR_GIVEN(nmp, MNTFROM)) {
1750 nfs_location_mntfromname(&nmp->nm_locations, nso->nso_location,
1751 vfs_statfs(nmp->nm_mountp)->f_mntfromname,
1752 sizeof(vfs_statfs(nmp->nm_mountp)->f_mntfromname), 0);
1753 }
1754 /* some negotiated values need to remain unchanged for the life of the mount */
1755 if (!nmp->nm_sotype) {
1756 nmp->nm_sotype = nso->nso_sotype;
1757 }
1758 if (!nmp->nm_vers) {
1759 nmp->nm_vers = nfsvers;
1760 #if CONFIG_NFS4
1761 /* If we negotiated NFSv4, set nm_nfsport if we ended up on the standard NFS port */
1762 if ((nfsvers >= NFS_VER4) && !NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_NFS_PORT)) {
1763 if (nso->nso_saddr->sa_family == AF_INET) {
1764 port = ((struct sockaddr_in*)nso->nso_saddr)->sin_port = htons(port);
1765 } else if (nso->nso_saddr->sa_family == AF_INET6) {
1766 port = ((struct sockaddr_in6*)nso->nso_saddr)->sin6_port = htons(port);
1767 } else {
1768 port = 0;
1769 }
1770 if (port == NFS_PORT) {
1771 nmp->nm_nfsport = NFS_PORT;
1772 }
1773 }
1774 #endif
1775 }
1776 #if CONFIG_NFS4
1777 /* do some version-specific pre-mount set up */
1778 if (nmp->nm_vers >= NFS_VER4) {
1779 microtime(&now);
1780 nmp->nm_mounttime = ((uint64_t)now.tv_sec << 32) | now.tv_usec;
1781 if (!NMFLAG(nmp, NOCALLBACK)) {
1782 nfs4_mount_callback_setup(nmp);
1783 }
1784 }
1785 #endif
1786 }
1787
1788 /* Initialize NFS socket state variables */
1789 lck_mtx_lock(&nmp->nm_lock);
1790 nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] =
1791 nmp->nm_srtt[3] = (NFS_TIMEO << 3);
1792 nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] =
1793 nmp->nm_sdrtt[3] = 0;
1794 if (nso->nso_sotype == SOCK_DGRAM) {
1795 nmp->nm_cwnd = NFS_MAXCWND / 2; /* Initial send window */
1796 nmp->nm_sent = 0;
1797 } else if (nso->nso_sotype == SOCK_STREAM) {
1798 nmp->nm_timeouts = 0;
1799 }
1800 nmp->nm_sockflags &= ~NMSOCK_CONNECTING;
1801 nmp->nm_sockflags |= NMSOCK_SETUP;
1802 /* move the socket to the mount structure */
1803 nmp->nm_nso = nso;
1804 oldsaddr = nmp->nm_saddr;
1805 nmp->nm_saddr = nso->nso_saddr;
1806 lck_mtx_unlock(&nmp->nm_lock);
1807 error = nfs_connect_setup(nmp);
1808 lck_mtx_lock(&nmp->nm_lock);
1809 nmp->nm_sockflags &= ~NMSOCK_SETUP;
1810 if (!error) {
1811 nmp->nm_sockflags |= NMSOCK_READY;
1812 wakeup(&nmp->nm_sockflags);
1813 }
1814 if (error) {
1815 NFS_SOCK_DBG("nfs connect %s socket %p setup failed %d\n",
1816 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
1817 nfs_socket_search_update_error(&nss, error);
1818 nmp->nm_saddr = oldsaddr;
1819 if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) {
1820 /* undo settings made prior to setup */
1821 if (!NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_SOCKET_TYPE)) {
1822 nmp->nm_sotype = 0;
1823 }
1824 #if CONFIG_NFS4
1825 if (nmp->nm_vers >= NFS_VER4) {
1826 if (!NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_NFS_PORT)) {
1827 nmp->nm_nfsport = 0;
1828 }
1829 if (nmp->nm_cbid) {
1830 nfs4_mount_callback_shutdown(nmp);
1831 }
1832 if (IS_VALID_CRED(nmp->nm_mcred)) {
1833 kauth_cred_unref(&nmp->nm_mcred);
1834 }
1835 bzero(&nmp->nm_un, sizeof(nmp->nm_un));
1836 }
1837 #endif
1838 nmp->nm_vers = 0;
1839 }
1840 lck_mtx_unlock(&nmp->nm_lock);
1841 nmp->nm_nso = NULL;
1842 nfs_socket_destroy(nso);
1843 goto keepsearching;
1844 }
1845
1846 /* update current location */
1847 if ((nmp->nm_locations.nl_current.nli_flags & NLI_VALID) &&
1848 (nmp->nm_locations.nl_current.nli_serv != nso->nso_location.nli_serv)) {
1849 /* server has changed, we should initiate failover/recovery */
1850 // XXX
1851 }
1852 nmp->nm_locations.nl_current = nso->nso_location;
1853 nmp->nm_locations.nl_current.nli_flags |= NLI_VALID;
1854
1855 if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) {
1856 /* We have now successfully connected... make a note of it. */
1857 nmp->nm_sockflags |= NMSOCK_HASCONNECTED;
1858 }
1859
1860 lck_mtx_unlock(&nmp->nm_lock);
1861 if (oldsaddr) {
1862 FREE(oldsaddr, M_SONAME);
1863 }
1864
1865 if (nss.nss_flags & NSS_WARNED) {
1866 log(LOG_INFO, "nfs_connect: socket connect completed for %s\n",
1867 vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1868 }
1869
1870 nmp->nm_nss = NULL;
1871 nfs_socket_search_cleanup(&nss);
1872 if (fh) {
1873 NFS_ZFREE(nfs_fhandle_zone, fh);
1874 }
1875 if (path) {
1876 NFS_ZFREE(ZV_NAMEI, path);
1877 }
1878 NFS_SOCK_DBG("nfs connect %s success\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1879 return 0;
1880 }
1881
1882
1883 /* setup & confirm socket connection is functional */
1884 int
1885 nfs_connect_setup(
1886 #if !CONFIG_NFS4
1887 __unused
1888 #endif
1889 struct nfsmount *nmp)
1890 {
1891 int error = 0;
1892 #if CONFIG_NFS4
1893 if (nmp->nm_vers >= NFS_VER4) {
1894 if (nmp->nm_state & NFSSTA_CLIENTID) {
1895 /* first, try to renew our current state */
1896 error = nfs4_renew(nmp, R_SETUP);
1897 if ((error == NFSERR_ADMIN_REVOKED) ||
1898 (error == NFSERR_CB_PATH_DOWN) ||
1899 (error == NFSERR_EXPIRED) ||
1900 (error == NFSERR_LEASE_MOVED) ||
1901 (error == NFSERR_STALE_CLIENTID)) {
1902 lck_mtx_lock(&nmp->nm_lock);
1903 nfs_need_recover(nmp, error);
1904 lck_mtx_unlock(&nmp->nm_lock);
1905 }
1906 }
1907 error = nfs4_setclientid(nmp);
1908 }
1909 #endif
1910 return error;
1911 }
1912
1913 /*
1914 * NFS socket reconnect routine:
1915 * Called when a connection is broken.
1916 * - disconnect the old socket
1917 * - nfs_connect() again
1918 * - set R_MUSTRESEND for all outstanding requests on mount point
1919 * If this fails the mount point is DEAD!
1920 */
1921 int
1922 nfs_reconnect(struct nfsmount *nmp)
1923 {
1924 struct nfsreq *rq;
1925 struct timeval now;
1926 thread_t thd = current_thread();
1927 int error, wentdown = 0, verbose = 1;
1928 time_t lastmsg;
1929 int timeo;
1930
1931 microuptime(&now);
1932 lastmsg = now.tv_sec - (nmp->nm_tprintf_delay - nmp->nm_tprintf_initial_delay);
1933
1934 nfs_disconnect(nmp);
1935
1936
1937 lck_mtx_lock(&nmp->nm_lock);
1938 timeo = nfs_is_squishy(nmp) ? 8 : 30;
1939 lck_mtx_unlock(&nmp->nm_lock);
1940
1941 while ((error = nfs_connect(nmp, verbose, timeo))) {
1942 verbose = 0;
1943 nfs_disconnect(nmp);
1944 if ((error == EINTR) || (error == ERESTART)) {
1945 return EINTR;
1946 }
1947 if (error == EIO) {
1948 return EIO;
1949 }
1950 microuptime(&now);
1951 if ((lastmsg + nmp->nm_tprintf_delay) < now.tv_sec) {
1952 lastmsg = now.tv_sec;
1953 nfs_down(nmp, thd, error, NFSSTA_TIMEO, "can not connect", 0);
1954 wentdown = 1;
1955 }
1956 lck_mtx_lock(&nmp->nm_lock);
1957 if (!(nmp->nm_state & NFSSTA_MOUNTED)) {
1958 /* we're not yet completely mounted and */
1959 /* we can't reconnect, so we fail */
1960 lck_mtx_unlock(&nmp->nm_lock);
1961 NFS_SOCK_DBG("Not mounted returning %d\n", error);
1962 return error;
1963 }
1964
1965 if (nfs_mount_check_dead_timeout(nmp)) {
1966 nfs_mount_make_zombie(nmp);
1967 lck_mtx_unlock(&nmp->nm_lock);
1968 return ENXIO;
1969 }
1970
1971 if ((error = nfs_sigintr(nmp, NULL, thd, 1))) {
1972 lck_mtx_unlock(&nmp->nm_lock);
1973 return error;
1974 }
1975 lck_mtx_unlock(&nmp->nm_lock);
1976 tsleep(nfs_reconnect, PSOCK, "nfs_reconnect_delay", 2 * hz);
1977 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
1978 return error;
1979 }
1980 }
1981
1982 if (wentdown) {
1983 nfs_up(nmp, thd, NFSSTA_TIMEO, "connected");
1984 }
1985
1986 /*
1987 * Loop through outstanding request list and mark all requests
1988 * as needing a resend. (Though nfs_need_reconnect() probably
1989 * marked them all already.)
1990 */
1991 lck_mtx_lock(nfs_request_mutex);
1992 TAILQ_FOREACH(rq, &nfs_reqq, r_chain) {
1993 if (rq->r_nmp == nmp) {
1994 lck_mtx_lock(&rq->r_mtx);
1995 if (!rq->r_error && !rq->r_nmrep.nmc_mhead && !(rq->r_flags & R_MUSTRESEND)) {
1996 rq->r_flags |= R_MUSTRESEND;
1997 rq->r_rtt = -1;
1998 wakeup(rq);
1999 if ((rq->r_flags & (R_IOD | R_ASYNC | R_ASYNCWAIT | R_SENDING)) == R_ASYNC) {
2000 nfs_asyncio_resend(rq);
2001 }
2002 }
2003 lck_mtx_unlock(&rq->r_mtx);
2004 }
2005 }
2006 lck_mtx_unlock(nfs_request_mutex);
2007 return 0;
2008 }
2009
2010 /*
2011 * NFS disconnect. Clean up and unlink.
2012 */
2013 void
2014 nfs_disconnect(struct nfsmount *nmp)
2015 {
2016 struct nfs_socket *nso;
2017
2018 lck_mtx_lock(&nmp->nm_lock);
2019 tryagain:
2020 if (nmp->nm_nso) {
2021 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
2022 if (nmp->nm_state & NFSSTA_SENDING) { /* wait for sending to complete */
2023 nmp->nm_state |= NFSSTA_WANTSND;
2024 msleep(&nmp->nm_state, &nmp->nm_lock, PZERO - 1, "nfswaitsending", &ts);
2025 goto tryagain;
2026 }
2027 if (nmp->nm_sockflags & NMSOCK_POKE) { /* wait for poking to complete */
2028 msleep(&nmp->nm_sockflags, &nmp->nm_lock, PZERO - 1, "nfswaitpoke", &ts);
2029 goto tryagain;
2030 }
2031 nmp->nm_sockflags |= NMSOCK_DISCONNECTING;
2032 nmp->nm_sockflags &= ~NMSOCK_READY;
2033 nso = nmp->nm_nso;
2034 nmp->nm_nso = NULL;
2035 if (nso->nso_saddr == nmp->nm_saddr) {
2036 nso->nso_saddr = NULL;
2037 }
2038 lck_mtx_unlock(&nmp->nm_lock);
2039 nfs_socket_destroy(nso);
2040 lck_mtx_lock(&nmp->nm_lock);
2041 nmp->nm_sockflags &= ~NMSOCK_DISCONNECTING;
2042 lck_mtx_unlock(&nmp->nm_lock);
2043 } else {
2044 lck_mtx_unlock(&nmp->nm_lock);
2045 }
2046 }
2047
2048 /*
2049 * mark an NFS mount as needing a reconnect/resends.
2050 */
2051 void
2052 nfs_need_reconnect(struct nfsmount *nmp)
2053 {
2054 struct nfsreq *rq;
2055
2056 lck_mtx_lock(&nmp->nm_lock);
2057 nmp->nm_sockflags &= ~(NMSOCK_READY | NMSOCK_SETUP);
2058 lck_mtx_unlock(&nmp->nm_lock);
2059
2060 /*
2061 * Loop through outstanding request list and
2062 * mark all requests as needing a resend.
2063 */
2064 lck_mtx_lock(nfs_request_mutex);
2065 TAILQ_FOREACH(rq, &nfs_reqq, r_chain) {
2066 if (rq->r_nmp == nmp) {
2067 lck_mtx_lock(&rq->r_mtx);
2068 if (!rq->r_error && !rq->r_nmrep.nmc_mhead && !(rq->r_flags & R_MUSTRESEND)) {
2069 rq->r_flags |= R_MUSTRESEND;
2070 rq->r_rtt = -1;
2071 wakeup(rq);
2072 if ((rq->r_flags & (R_IOD | R_ASYNC | R_ASYNCWAIT | R_SENDING)) == R_ASYNC) {
2073 nfs_asyncio_resend(rq);
2074 }
2075 }
2076 lck_mtx_unlock(&rq->r_mtx);
2077 }
2078 }
2079 lck_mtx_unlock(nfs_request_mutex);
2080 }
2081
2082
2083 /*
2084 * thread to handle miscellaneous async NFS socket work (reconnects/resends)
2085 */
2086 void
2087 nfs_mount_sock_thread(void *arg, __unused wait_result_t wr)
2088 {
2089 struct nfsmount *nmp = arg;
2090 struct timespec ts = { .tv_sec = 30, .tv_nsec = 0 };
2091 thread_t thd = current_thread();
2092 struct nfsreq *req;
2093 struct timeval now;
2094 int error, dofinish;
2095 nfsnode_t np;
2096 int do_reconnect_sleep = 0;
2097
2098 lck_mtx_lock(&nmp->nm_lock);
2099 while (!(nmp->nm_sockflags & NMSOCK_READY) ||
2100 !TAILQ_EMPTY(&nmp->nm_resendq) ||
2101 !LIST_EMPTY(&nmp->nm_monlist) ||
2102 nmp->nm_deadto_start ||
2103 (nmp->nm_state & NFSSTA_RECOVER) ||
2104 ((nmp->nm_vers >= NFS_VER4) && !TAILQ_EMPTY(&nmp->nm_dreturnq))) {
2105 if (nmp->nm_sockflags & NMSOCK_UNMOUNT) {
2106 break;
2107 }
2108 /* do reconnect, if necessary */
2109 if (!(nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) {
2110 if (nmp->nm_reconnect_start <= 0) {
2111 microuptime(&now);
2112 nmp->nm_reconnect_start = now.tv_sec;
2113 }
2114 lck_mtx_unlock(&nmp->nm_lock);
2115 NFS_SOCK_DBG("nfs reconnect %s\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
2116 /*
2117 * XXX We don't want to call reconnect again right away if returned errors
2118 * before that may not have blocked. This has caused spamming null procs
2119 * from machines in the pass.
2120 */
2121 if (do_reconnect_sleep) {
2122 tsleep(nfs_mount_sock_thread, PSOCK, "nfs_reconnect_sock_thread_delay", hz);
2123 }
2124 error = nfs_reconnect(nmp);
2125 if (error) {
2126 int lvl = 7;
2127 if (error == EIO || error == EINTR) {
2128 lvl = (do_reconnect_sleep++ % 600) ? 7 : 0;
2129 }
2130 NFS_DBG(NFS_FAC_SOCK, lvl, "nfs reconnect %s: returned %d\n",
2131 vfs_statfs(nmp->nm_mountp)->f_mntfromname, error);
2132 } else {
2133 nmp->nm_reconnect_start = 0;
2134 do_reconnect_sleep = 0;
2135 }
2136 lck_mtx_lock(&nmp->nm_lock);
2137 }
2138 if ((nmp->nm_sockflags & NMSOCK_READY) &&
2139 (nmp->nm_state & NFSSTA_RECOVER) &&
2140 !(nmp->nm_sockflags & NMSOCK_UNMOUNT) &&
2141 !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) {
2142 /* perform state recovery */
2143 lck_mtx_unlock(&nmp->nm_lock);
2144 nfs_recover(nmp);
2145 lck_mtx_lock(&nmp->nm_lock);
2146 }
2147 #if CONFIG_NFS4
2148 /* handle NFSv4 delegation returns */
2149 while ((nmp->nm_vers >= NFS_VER4) && !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) &&
2150 (nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & NFSSTA_RECOVER) &&
2151 ((np = TAILQ_FIRST(&nmp->nm_dreturnq)))) {
2152 lck_mtx_unlock(&nmp->nm_lock);
2153 nfs4_delegation_return(np, R_RECOVER, thd, nmp->nm_mcred);
2154 lck_mtx_lock(&nmp->nm_lock);
2155 }
2156 #endif
2157 /* do resends, if necessary/possible */
2158 while ((((nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & NFSSTA_RECOVER)) ||
2159 (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) &&
2160 ((req = TAILQ_FIRST(&nmp->nm_resendq)))) {
2161 if (req->r_resendtime) {
2162 microuptime(&now);
2163 }
2164 while (req && !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) && req->r_resendtime && (now.tv_sec < req->r_resendtime)) {
2165 req = TAILQ_NEXT(req, r_rchain);
2166 }
2167 if (!req) {
2168 break;
2169 }
2170 /* acquire both locks in the right order: first req->r_mtx and then nmp->nm_lock */
2171 lck_mtx_unlock(&nmp->nm_lock);
2172 lck_mtx_lock(&req->r_mtx);
2173 lck_mtx_lock(&nmp->nm_lock);
2174 if ((req->r_flags & R_RESENDQ) == 0 || (req->r_rchain.tqe_next == NFSREQNOLIST)) {
2175 lck_mtx_unlock(&req->r_mtx);
2176 continue;
2177 }
2178 TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
2179 req->r_flags &= ~R_RESENDQ;
2180 req->r_rchain.tqe_next = NFSREQNOLIST;
2181 lck_mtx_unlock(&nmp->nm_lock);
2182 /* Note that we have a reference on the request that was taken nfs_asyncio_resend */
2183 if (req->r_error || req->r_nmrep.nmc_mhead) {
2184 dofinish = req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT);
2185 wakeup(req);
2186 lck_mtx_unlock(&req->r_mtx);
2187 if (dofinish) {
2188 nfs_asyncio_finish(req);
2189 }
2190 nfs_request_rele(req);
2191 lck_mtx_lock(&nmp->nm_lock);
2192 continue;
2193 }
2194 if ((req->r_flags & R_RESTART) || nfs_request_using_gss(req)) {
2195 req->r_flags &= ~R_RESTART;
2196 req->r_resendtime = 0;
2197 lck_mtx_unlock(&req->r_mtx);
2198 /* async RPCs on GSS mounts need to be rebuilt and resent. */
2199 nfs_reqdequeue(req);
2200 #if CONFIG_NFS_GSS
2201 if (nfs_request_using_gss(req)) {
2202 nfs_gss_clnt_rpcdone(req);
2203 error = nfs_gss_clnt_args_restore(req);
2204 if (error == ENEEDAUTH) {
2205 req->r_xid = 0;
2206 }
2207 }
2208 #endif /* CONFIG_NFS_GSS */
2209 NFS_SOCK_DBG("nfs async%s restart: p %d x 0x%llx f 0x%x rtt %d\n",
2210 nfs_request_using_gss(req) ? " gss" : "", req->r_procnum, req->r_xid,
2211 req->r_flags, req->r_rtt);
2212 error = nfs_sigintr(nmp, req, req->r_thread, 0);
2213 if (!error) {
2214 error = nfs_request_add_header(req);
2215 }
2216 if (!error) {
2217 error = nfs_request_send(req, 0);
2218 }
2219 lck_mtx_lock(&req->r_mtx);
2220 if (error) {
2221 req->r_error = error;
2222 }
2223 wakeup(req);
2224 dofinish = error && req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT);
2225 lck_mtx_unlock(&req->r_mtx);
2226 if (dofinish) {
2227 nfs_asyncio_finish(req);
2228 }
2229 nfs_request_rele(req);
2230 lck_mtx_lock(&nmp->nm_lock);
2231 error = 0;
2232 continue;
2233 }
2234 NFS_SOCK_DBG("nfs async resend: p %d x 0x%llx f 0x%x rtt %d\n",
2235 req->r_procnum, req->r_xid, req->r_flags, req->r_rtt);
2236 error = nfs_sigintr(nmp, req, req->r_thread, 0);
2237 if (!error) {
2238 req->r_flags |= R_SENDING;
2239 lck_mtx_unlock(&req->r_mtx);
2240 error = nfs_send(req, 0);
2241 lck_mtx_lock(&req->r_mtx);
2242 if (!error) {
2243 wakeup(req);
2244 lck_mtx_unlock(&req->r_mtx);
2245 nfs_request_rele(req);
2246 lck_mtx_lock(&nmp->nm_lock);
2247 continue;
2248 }
2249 }
2250 req->r_error = error;
2251 wakeup(req);
2252 dofinish = req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT);
2253 lck_mtx_unlock(&req->r_mtx);
2254 if (dofinish) {
2255 nfs_asyncio_finish(req);
2256 }
2257 nfs_request_rele(req);
2258 lck_mtx_lock(&nmp->nm_lock);
2259 }
2260 if (nfs_mount_check_dead_timeout(nmp)) {
2261 nfs_mount_make_zombie(nmp);
2262 break;
2263 }
2264
2265 if (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) {
2266 break;
2267 }
2268 /* check monitored nodes, if necessary/possible */
2269 if (!LIST_EMPTY(&nmp->nm_monlist)) {
2270 nmp->nm_state |= NFSSTA_MONITOR_SCAN;
2271 LIST_FOREACH(np, &nmp->nm_monlist, n_monlink) {
2272 if (!(nmp->nm_sockflags & NMSOCK_READY) ||
2273 (nmp->nm_state & (NFSSTA_RECOVER | NFSSTA_UNMOUNTING | NFSSTA_FORCE | NFSSTA_DEAD))) {
2274 break;
2275 }
2276 np->n_mflag |= NMMONSCANINPROG;
2277 lck_mtx_unlock(&nmp->nm_lock);
2278 error = nfs_getattr(np, NULL, vfs_context_kernel(), (NGA_UNCACHED | NGA_MONITOR));
2279 if (!error && ISSET(np->n_flag, NUPDATESIZE)) { /* update quickly to avoid multiple events */
2280 nfs_data_update_size(np, 0);
2281 }
2282 lck_mtx_lock(&nmp->nm_lock);
2283 np->n_mflag &= ~NMMONSCANINPROG;
2284 if (np->n_mflag & NMMONSCANWANT) {
2285 np->n_mflag &= ~NMMONSCANWANT;
2286 wakeup(&np->n_mflag);
2287 }
2288 if (error || !(nmp->nm_sockflags & NMSOCK_READY) ||
2289 (nmp->nm_state & (NFSSTA_RECOVER | NFSSTA_UNMOUNTING | NFSSTA_FORCE | NFSSTA_DEAD))) {
2290 break;
2291 }
2292 }
2293 nmp->nm_state &= ~NFSSTA_MONITOR_SCAN;
2294 if (nmp->nm_state & NFSSTA_UNMOUNTING) {
2295 wakeup(&nmp->nm_state); /* let unmounting thread know scan is done */
2296 }
2297 }
2298 if ((nmp->nm_sockflags & NMSOCK_READY) || (nmp->nm_state & (NFSSTA_RECOVER | NFSSTA_UNMOUNTING))) {
2299 if (nmp->nm_deadto_start || !TAILQ_EMPTY(&nmp->nm_resendq) ||
2300 (nmp->nm_state & NFSSTA_RECOVER)) {
2301 ts.tv_sec = 1;
2302 } else {
2303 ts.tv_sec = 5;
2304 }
2305 msleep(&nmp->nm_sockthd, &nmp->nm_lock, PSOCK, "nfssockthread", &ts);
2306 }
2307 }
2308
2309 /* If we're unmounting, send the unmount RPC, if requested/appropriate. */
2310 if ((nmp->nm_sockflags & NMSOCK_UNMOUNT) &&
2311 (nmp->nm_state & NFSSTA_MOUNTED) && NMFLAG(nmp, CALLUMNT) &&
2312 (nmp->nm_vers < NFS_VER4) && !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) {
2313 lck_mtx_unlock(&nmp->nm_lock);
2314 nfs3_umount_rpc(nmp, vfs_context_kernel(),
2315 (nmp->nm_sockflags & NMSOCK_READY) ? 6 : 2);
2316 lck_mtx_lock(&nmp->nm_lock);
2317 }
2318
2319 if (nmp->nm_sockthd == thd) {
2320 nmp->nm_sockthd = NULL;
2321 }
2322 lck_mtx_unlock(&nmp->nm_lock);
2323 wakeup(&nmp->nm_sockthd);
2324 thread_terminate(thd);
2325 }
2326
2327 /* start or wake a mount's socket thread */
2328 void
2329 nfs_mount_sock_thread_wake(struct nfsmount *nmp)
2330 {
2331 if (nmp->nm_sockthd) {
2332 wakeup(&nmp->nm_sockthd);
2333 } else if (kernel_thread_start(nfs_mount_sock_thread, nmp, &nmp->nm_sockthd) == KERN_SUCCESS) {
2334 thread_deallocate(nmp->nm_sockthd);
2335 }
2336 }
2337
2338 /*
2339 * Check if we should mark the mount dead because the
2340 * unresponsive mount has reached the dead timeout.
2341 * (must be called with nmp locked)
2342 */
2343 int
2344 nfs_mount_check_dead_timeout(struct nfsmount *nmp)
2345 {
2346 struct timeval now;
2347
2348 if (nmp->nm_state & NFSSTA_DEAD) {
2349 return 1;
2350 }
2351 if (nmp->nm_deadto_start == 0) {
2352 return 0;
2353 }
2354 nfs_is_squishy(nmp);
2355 if (nmp->nm_curdeadtimeout <= 0) {
2356 return 0;
2357 }
2358 microuptime(&now);
2359 if ((now.tv_sec - nmp->nm_deadto_start) < nmp->nm_curdeadtimeout) {
2360 return 0;
2361 }
2362 return 1;
2363 }
2364
2365 /*
2366 * Call nfs_mount_zombie to remove most of the
2367 * nfs state for the mount, and then ask to be forcibly unmounted.
2368 *
2369 * Assumes the nfs mount structure lock nm_lock is held.
2370 */
2371
2372 void
2373 nfs_mount_make_zombie(struct nfsmount *nmp)
2374 {
2375 fsid_t fsid;
2376
2377 if (!nmp) {
2378 return;
2379 }
2380
2381 if (nmp->nm_state & NFSSTA_DEAD) {
2382 return;
2383 }
2384
2385 printf("nfs server %s: %sdead\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname,
2386 (nmp->nm_curdeadtimeout != nmp->nm_deadtimeout) ? "squished " : "");
2387 fsid = vfs_statfs(nmp->nm_mountp)->f_fsid;
2388 lck_mtx_unlock(&nmp->nm_lock);
2389 nfs_mount_zombie(nmp, NFSSTA_DEAD);
2390 vfs_event_signal(&fsid, VQ_DEAD, 0);
2391 lck_mtx_lock(&nmp->nm_lock);
2392 }
2393
2394
2395 /*
2396 * NFS callback channel socket state
2397 */
2398 struct nfs_callback_socket {
2399 TAILQ_ENTRY(nfs_callback_socket) ncbs_link;
2400 socket_t ncbs_so; /* the socket */
2401 struct sockaddr_storage ncbs_saddr; /* socket address */
2402 struct nfs_rpc_record_state ncbs_rrs; /* RPC record parsing state */
2403 time_t ncbs_stamp; /* last accessed at */
2404 uint32_t ncbs_flags; /* see below */
2405 };
2406 #define NCBSOCK_UPCALL 0x0001
2407 #define NCBSOCK_UPCALLWANT 0x0002
2408 #define NCBSOCK_DEAD 0x0004
2409
2410 #if CONFIG_NFS4
2411 /*
2412 * NFS callback channel state
2413 *
2414 * One listening socket for accepting socket connections from servers and
2415 * a list of connected sockets to handle callback requests on.
2416 * Mounts registered with the callback channel are assigned IDs and
2417 * put on a list so that the callback request handling code can match
2418 * the requests up with mounts.
2419 */
2420 socket_t nfs4_cb_so = NULL;
2421 socket_t nfs4_cb_so6 = NULL;
2422 in_port_t nfs4_cb_port = 0;
2423 in_port_t nfs4_cb_port6 = 0;
2424 uint32_t nfs4_cb_id = 0;
2425 uint32_t nfs4_cb_so_usecount = 0;
2426 TAILQ_HEAD(nfs4_cb_sock_list, nfs_callback_socket) nfs4_cb_socks;
2427 TAILQ_HEAD(nfs4_cb_mount_list, nfsmount) nfs4_cb_mounts;
2428
2429 int nfs4_cb_handler(struct nfs_callback_socket *, mbuf_t);
2430
2431 /*
2432 * Set up the callback channel for the NFS mount.
2433 *
2434 * Initializes the callback channel socket state and
2435 * assigns a callback ID to the mount.
2436 */
2437 void
2438 nfs4_mount_callback_setup(struct nfsmount *nmp)
2439 {
2440 struct sockaddr_in sin;
2441 struct sockaddr_in6 sin6;
2442 socket_t so = NULL;
2443 socket_t so6 = NULL;
2444 struct timeval timeo;
2445 int error, on = 1;
2446 in_port_t port;
2447
2448 lck_mtx_lock(nfs_global_mutex);
2449 if (nfs4_cb_id == 0) {
2450 TAILQ_INIT(&nfs4_cb_mounts);
2451 TAILQ_INIT(&nfs4_cb_socks);
2452 nfs4_cb_id++;
2453 }
2454 nmp->nm_cbid = nfs4_cb_id++;
2455 if (nmp->nm_cbid == 0) {
2456 nmp->nm_cbid = nfs4_cb_id++;
2457 }
2458 nfs4_cb_so_usecount++;
2459 TAILQ_INSERT_HEAD(&nfs4_cb_mounts, nmp, nm_cblink);
2460
2461 if (nfs4_cb_so) {
2462 lck_mtx_unlock(nfs_global_mutex);
2463 return;
2464 }
2465
2466 /* IPv4 */
2467 error = sock_socket(AF_INET, SOCK_STREAM, IPPROTO_TCP, nfs4_cb_accept, NULL, &nfs4_cb_so);
2468 if (error) {
2469 log(LOG_INFO, "nfs callback setup: error %d creating listening IPv4 socket\n", error);
2470 goto fail;
2471 }
2472 so = nfs4_cb_so;
2473
2474 if (NFS_PORT_INVALID(nfs_callback_port)) {
2475 error = EINVAL;
2476 log(LOG_INFO, "nfs callback setup: error %d nfs_callback_port %d is not valid\n", error, nfs_callback_port);
2477 goto fail;
2478 }
2479
2480 sock_setsockopt(so, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
2481 sin.sin_len = sizeof(struct sockaddr_in);
2482 sin.sin_family = AF_INET;
2483 sin.sin_addr.s_addr = htonl(INADDR_ANY);
2484 sin.sin_port = htons((in_port_t)nfs_callback_port); /* try to use specified port */
2485 error = sock_bind(so, (struct sockaddr *)&sin);
2486 if (error) {
2487 log(LOG_INFO, "nfs callback setup: error %d binding listening IPv4 socket\n", error);
2488 goto fail;
2489 }
2490 error = sock_getsockname(so, (struct sockaddr *)&sin, sin.sin_len);
2491 if (error) {
2492 log(LOG_INFO, "nfs callback setup: error %d getting listening IPv4 socket port\n", error);
2493 goto fail;
2494 }
2495 nfs4_cb_port = ntohs(sin.sin_port);
2496
2497 error = sock_listen(so, 32);
2498 if (error) {
2499 log(LOG_INFO, "nfs callback setup: error %d on IPv4 listen\n", error);
2500 goto fail;
2501 }
2502
2503 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
2504 timeo.tv_usec = 0;
2505 timeo.tv_sec = 60;
2506 error = sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
2507 if (error) {
2508 log(LOG_INFO, "nfs callback setup: error %d setting IPv4 socket rx timeout\n", error);
2509 }
2510 error = sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
2511 if (error) {
2512 log(LOG_INFO, "nfs callback setup: error %d setting IPv4 socket tx timeout\n", error);
2513 }
2514 sock_setsockopt(so, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
2515 sock_setsockopt(so, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
2516 sock_setsockopt(so, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
2517 error = 0;
2518
2519 /* IPv6 */
2520 error = sock_socket(AF_INET6, SOCK_STREAM, IPPROTO_TCP, nfs4_cb_accept, NULL, &nfs4_cb_so6);
2521 if (error) {
2522 log(LOG_INFO, "nfs callback setup: error %d creating listening IPv6 socket\n", error);
2523 goto fail;
2524 }
2525 so6 = nfs4_cb_so6;
2526
2527 sock_setsockopt(so6, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
2528 sock_setsockopt(so6, IPPROTO_IPV6, IPV6_V6ONLY, &on, sizeof(on));
2529 /* try to use specified port or same port as IPv4 */
2530 port = nfs_callback_port ? (in_port_t)nfs_callback_port : nfs4_cb_port;
2531 ipv6_bind_again:
2532 sin6.sin6_len = sizeof(struct sockaddr_in6);
2533 sin6.sin6_family = AF_INET6;
2534 sin6.sin6_addr = in6addr_any;
2535 sin6.sin6_port = htons(port);
2536 error = sock_bind(so6, (struct sockaddr *)&sin6);
2537 if (error) {
2538 if (port != nfs_callback_port) {
2539 /* if we simply tried to match the IPv4 port, then try any port */
2540 port = 0;
2541 goto ipv6_bind_again;
2542 }
2543 log(LOG_INFO, "nfs callback setup: error %d binding listening IPv6 socket\n", error);
2544 goto fail;
2545 }
2546 error = sock_getsockname(so6, (struct sockaddr *)&sin6, sin6.sin6_len);
2547 if (error) {
2548 log(LOG_INFO, "nfs callback setup: error %d getting listening IPv6 socket port\n", error);
2549 goto fail;
2550 }
2551 nfs4_cb_port6 = ntohs(sin6.sin6_port);
2552
2553 error = sock_listen(so6, 32);
2554 if (error) {
2555 log(LOG_INFO, "nfs callback setup: error %d on IPv6 listen\n", error);
2556 goto fail;
2557 }
2558
2559 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
2560 timeo.tv_usec = 0;
2561 timeo.tv_sec = 60;
2562 error = sock_setsockopt(so6, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
2563 if (error) {
2564 log(LOG_INFO, "nfs callback setup: error %d setting IPv6 socket rx timeout\n", error);
2565 }
2566 error = sock_setsockopt(so6, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
2567 if (error) {
2568 log(LOG_INFO, "nfs callback setup: error %d setting IPv6 socket tx timeout\n", error);
2569 }
2570 sock_setsockopt(so6, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
2571 sock_setsockopt(so6, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
2572 sock_setsockopt(so6, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
2573 error = 0;
2574
2575 fail:
2576 if (error) {
2577 nfs4_cb_so = nfs4_cb_so6 = NULL;
2578 lck_mtx_unlock(nfs_global_mutex);
2579 if (so) {
2580 sock_shutdown(so, SHUT_RDWR);
2581 sock_close(so);
2582 }
2583 if (so6) {
2584 sock_shutdown(so6, SHUT_RDWR);
2585 sock_close(so6);
2586 }
2587 } else {
2588 lck_mtx_unlock(nfs_global_mutex);
2589 }
2590 }
2591
2592 /*
2593 * Shut down the callback channel for the NFS mount.
2594 *
2595 * Clears the mount's callback ID and releases the mounts
2596 * reference on the callback socket. Last reference dropped
2597 * will also shut down the callback socket(s).
2598 */
2599 void
2600 nfs4_mount_callback_shutdown(struct nfsmount *nmp)
2601 {
2602 struct nfs_callback_socket *ncbsp;
2603 socket_t so, so6;
2604 struct nfs4_cb_sock_list cb_socks;
2605 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
2606
2607 lck_mtx_lock(nfs_global_mutex);
2608 if (nmp->nm_cbid == 0) {
2609 lck_mtx_unlock(nfs_global_mutex);
2610 return;
2611 }
2612 TAILQ_REMOVE(&nfs4_cb_mounts, nmp, nm_cblink);
2613 /* wait for any callbacks in progress to complete */
2614 while (nmp->nm_cbrefs) {
2615 msleep(&nmp->nm_cbrefs, nfs_global_mutex, PSOCK, "cbshutwait", &ts);
2616 }
2617 nmp->nm_cbid = 0;
2618 if (--nfs4_cb_so_usecount) {
2619 lck_mtx_unlock(nfs_global_mutex);
2620 return;
2621 }
2622 so = nfs4_cb_so;
2623 so6 = nfs4_cb_so6;
2624 nfs4_cb_so = nfs4_cb_so6 = NULL;
2625 TAILQ_INIT(&cb_socks);
2626 TAILQ_CONCAT(&cb_socks, &nfs4_cb_socks, ncbs_link);
2627 lck_mtx_unlock(nfs_global_mutex);
2628 if (so) {
2629 sock_shutdown(so, SHUT_RDWR);
2630 sock_close(so);
2631 }
2632 if (so6) {
2633 sock_shutdown(so6, SHUT_RDWR);
2634 sock_close(so6);
2635 }
2636 while ((ncbsp = TAILQ_FIRST(&cb_socks))) {
2637 TAILQ_REMOVE(&cb_socks, ncbsp, ncbs_link);
2638 sock_shutdown(ncbsp->ncbs_so, SHUT_RDWR);
2639 sock_close(ncbsp->ncbs_so);
2640 nfs_rpc_record_state_cleanup(&ncbsp->ncbs_rrs);
2641 FREE(ncbsp, M_TEMP);
2642 }
2643 }
2644
2645 /*
2646 * Check periodically for stale/unused nfs callback sockets
2647 */
2648 #define NFS4_CB_TIMER_PERIOD 30
2649 #define NFS4_CB_IDLE_MAX 300
2650 void
2651 nfs4_callback_timer(__unused void *param0, __unused void *param1)
2652 {
2653 struct nfs_callback_socket *ncbsp, *nextncbsp;
2654 struct timeval now;
2655
2656 loop:
2657 lck_mtx_lock(nfs_global_mutex);
2658 if (TAILQ_EMPTY(&nfs4_cb_socks)) {
2659 nfs4_callback_timer_on = 0;
2660 lck_mtx_unlock(nfs_global_mutex);
2661 return;
2662 }
2663 microuptime(&now);
2664 TAILQ_FOREACH_SAFE(ncbsp, &nfs4_cb_socks, ncbs_link, nextncbsp) {
2665 if (!(ncbsp->ncbs_flags & NCBSOCK_DEAD) &&
2666 (now.tv_sec < (ncbsp->ncbs_stamp + NFS4_CB_IDLE_MAX))) {
2667 continue;
2668 }
2669 TAILQ_REMOVE(&nfs4_cb_socks, ncbsp, ncbs_link);
2670 lck_mtx_unlock(nfs_global_mutex);
2671 sock_shutdown(ncbsp->ncbs_so, SHUT_RDWR);
2672 sock_close(ncbsp->ncbs_so);
2673 nfs_rpc_record_state_cleanup(&ncbsp->ncbs_rrs);
2674 FREE(ncbsp, M_TEMP);
2675 goto loop;
2676 }
2677 nfs4_callback_timer_on = 1;
2678 nfs_interval_timer_start(nfs4_callback_timer_call,
2679 NFS4_CB_TIMER_PERIOD * 1000);
2680 lck_mtx_unlock(nfs_global_mutex);
2681 }
2682
2683 /*
2684 * Accept a new callback socket.
2685 */
2686 void
2687 nfs4_cb_accept(socket_t so, __unused void *arg, __unused int waitflag)
2688 {
2689 socket_t newso = NULL;
2690 struct nfs_callback_socket *ncbsp;
2691 struct nfsmount *nmp;
2692 struct timeval timeo, now;
2693 int error, on = 1, ip;
2694
2695 if (so == nfs4_cb_so) {
2696 ip = 4;
2697 } else if (so == nfs4_cb_so6) {
2698 ip = 6;
2699 } else {
2700 return;
2701 }
2702
2703 /* allocate/initialize a new nfs_callback_socket */
2704 MALLOC(ncbsp, struct nfs_callback_socket *, sizeof(struct nfs_callback_socket), M_TEMP, M_WAITOK);
2705 if (!ncbsp) {
2706 log(LOG_ERR, "nfs callback accept: no memory for new socket\n");
2707 return;
2708 }
2709 bzero(ncbsp, sizeof(*ncbsp));
2710 ncbsp->ncbs_saddr.ss_len = (ip == 4) ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6);
2711 nfs_rpc_record_state_init(&ncbsp->ncbs_rrs);
2712
2713 /* accept a new socket */
2714 error = sock_accept(so, (struct sockaddr*)&ncbsp->ncbs_saddr,
2715 ncbsp->ncbs_saddr.ss_len, MSG_DONTWAIT,
2716 nfs4_cb_rcv, ncbsp, &newso);
2717 if (error) {
2718 log(LOG_INFO, "nfs callback accept: error %d accepting IPv%d socket\n", error, ip);
2719 FREE(ncbsp, M_TEMP);
2720 return;
2721 }
2722
2723 /* set up the new socket */
2724 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
2725 timeo.tv_usec = 0;
2726 timeo.tv_sec = 60;
2727 error = sock_setsockopt(newso, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
2728 if (error) {
2729 log(LOG_INFO, "nfs callback socket: error %d setting IPv%d socket rx timeout\n", error, ip);
2730 }
2731 error = sock_setsockopt(newso, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
2732 if (error) {
2733 log(LOG_INFO, "nfs callback socket: error %d setting IPv%d socket tx timeout\n", error, ip);
2734 }
2735 sock_setsockopt(newso, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
2736 sock_setsockopt(newso, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
2737 sock_setsockopt(newso, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
2738 sock_setsockopt(newso, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
2739
2740 ncbsp->ncbs_so = newso;
2741 microuptime(&now);
2742 ncbsp->ncbs_stamp = now.tv_sec;
2743
2744 lck_mtx_lock(nfs_global_mutex);
2745
2746 /* add it to the list */
2747 TAILQ_INSERT_HEAD(&nfs4_cb_socks, ncbsp, ncbs_link);
2748
2749 /* verify it's from a host we have mounted */
2750 TAILQ_FOREACH(nmp, &nfs4_cb_mounts, nm_cblink) {
2751 /* check if socket's source address matches this mount's server address */
2752 if (!nmp->nm_saddr) {
2753 continue;
2754 }
2755 if (nfs_sockaddr_cmp((struct sockaddr*)&ncbsp->ncbs_saddr, nmp->nm_saddr) == 0) {
2756 break;
2757 }
2758 }
2759 if (!nmp) { /* we don't want this socket, mark it dead */
2760 ncbsp->ncbs_flags |= NCBSOCK_DEAD;
2761 }
2762
2763 /* make sure the callback socket cleanup timer is running */
2764 /* (shorten the timer if we've got a socket we don't want) */
2765 if (!nfs4_callback_timer_on) {
2766 nfs4_callback_timer_on = 1;
2767 nfs_interval_timer_start(nfs4_callback_timer_call,
2768 !nmp ? 500 : (NFS4_CB_TIMER_PERIOD * 1000));
2769 } else if (!nmp && (nfs4_callback_timer_on < 2)) {
2770 nfs4_callback_timer_on = 2;
2771 thread_call_cancel(nfs4_callback_timer_call);
2772 nfs_interval_timer_start(nfs4_callback_timer_call, 500);
2773 }
2774
2775 lck_mtx_unlock(nfs_global_mutex);
2776 }
2777
2778 /*
2779 * Receive mbufs from callback sockets into RPC records and process each record.
2780 * Detect connection has been closed and shut down.
2781 */
2782 void
2783 nfs4_cb_rcv(socket_t so, void *arg, __unused int waitflag)
2784 {
2785 struct nfs_callback_socket *ncbsp = arg;
2786 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
2787 struct timeval now;
2788 mbuf_t m;
2789 int error = 0, recv = 1;
2790
2791 lck_mtx_lock(nfs_global_mutex);
2792 while (ncbsp->ncbs_flags & NCBSOCK_UPCALL) {
2793 /* wait if upcall is already in progress */
2794 ncbsp->ncbs_flags |= NCBSOCK_UPCALLWANT;
2795 msleep(ncbsp, nfs_global_mutex, PSOCK, "cbupcall", &ts);
2796 }
2797 ncbsp->ncbs_flags |= NCBSOCK_UPCALL;
2798 lck_mtx_unlock(nfs_global_mutex);
2799
2800 /* loop while we make error-free progress */
2801 while (!error && recv) {
2802 error = nfs_rpc_record_read(so, &ncbsp->ncbs_rrs, MSG_DONTWAIT, &recv, &m);
2803 if (m) { /* handle the request */
2804 error = nfs4_cb_handler(ncbsp, m);
2805 }
2806 }
2807
2808 /* note: no error and no data indicates server closed its end */
2809 if ((error != EWOULDBLOCK) && (error || !recv)) {
2810 /*
2811 * Socket is either being closed or should be.
2812 * We can't close the socket in the context of the upcall.
2813 * So we mark it as dead and leave it for the cleanup timer to reap.
2814 */
2815 ncbsp->ncbs_stamp = 0;
2816 ncbsp->ncbs_flags |= NCBSOCK_DEAD;
2817 } else {
2818 microuptime(&now);
2819 ncbsp->ncbs_stamp = now.tv_sec;
2820 }
2821
2822 lck_mtx_lock(nfs_global_mutex);
2823 ncbsp->ncbs_flags &= ~NCBSOCK_UPCALL;
2824 lck_mtx_unlock(nfs_global_mutex);
2825 wakeup(ncbsp);
2826 }
2827
2828 /*
2829 * Handle an NFS callback channel request.
2830 */
2831 int
2832 nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq)
2833 {
2834 socket_t so = ncbsp->ncbs_so;
2835 struct nfsm_chain nmreq, nmrep;
2836 mbuf_t mhead = NULL, mrest = NULL, m;
2837 struct msghdr msg;
2838 struct nfsmount *nmp;
2839 fhandle_t *fh;
2840 nfsnode_t np;
2841 nfs_stateid stateid;
2842 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], rbitmap[NFS_ATTR_BITMAP_LEN], bmlen, truncate, attrbytes;
2843 uint32_t val, xid, procnum, taglen, cbid, numops, op, status;
2844 uint32_t auth_type, auth_len;
2845 uint32_t numres, *pnumres;
2846 int error = 0, replen, len;
2847 size_t sentlen = 0;
2848
2849 xid = numops = op = status = procnum = taglen = cbid = 0;
2850 fh = zalloc(nfs_fhandle_zone);
2851
2852 nfsm_chain_dissect_init(error, &nmreq, mreq);
2853 nfsm_chain_get_32(error, &nmreq, xid); // RPC XID
2854 nfsm_chain_get_32(error, &nmreq, val); // RPC Call
2855 nfsm_assert(error, (val == RPC_CALL), EBADRPC);
2856 nfsm_chain_get_32(error, &nmreq, val); // RPC Version
2857 nfsm_assert(error, (val == RPC_VER2), ERPCMISMATCH);
2858 nfsm_chain_get_32(error, &nmreq, val); // RPC Program Number
2859 nfsm_assert(error, (val == NFS4_CALLBACK_PROG), EPROGUNAVAIL);
2860 nfsm_chain_get_32(error, &nmreq, val); // NFS Callback Program Version Number
2861 nfsm_assert(error, (val == NFS4_CALLBACK_PROG_VERSION), EPROGMISMATCH);
2862 nfsm_chain_get_32(error, &nmreq, procnum); // NFS Callback Procedure Number
2863 nfsm_assert(error, (procnum <= NFSPROC4_CB_COMPOUND), EPROCUNAVAIL);
2864
2865 /* Handle authentication */
2866 /* XXX just ignore auth for now - handling kerberos may be tricky */
2867 nfsm_chain_get_32(error, &nmreq, auth_type); // RPC Auth Flavor
2868 nfsm_chain_get_32(error, &nmreq, auth_len); // RPC Auth Length
2869 nfsm_assert(error, (auth_len <= RPCAUTH_MAXSIZ), EBADRPC);
2870 if (!error && (auth_len > 0)) {
2871 nfsm_chain_adv(error, &nmreq, nfsm_rndup(auth_len));
2872 }
2873 nfsm_chain_adv(error, &nmreq, NFSX_UNSIGNED); // verifier flavor (should be AUTH_NONE)
2874 nfsm_chain_get_32(error, &nmreq, auth_len); // verifier length
2875 nfsm_assert(error, (auth_len <= RPCAUTH_MAXSIZ), EBADRPC);
2876 if (!error && (auth_len > 0)) {
2877 nfsm_chain_adv(error, &nmreq, nfsm_rndup(auth_len));
2878 }
2879 if (error) {
2880 status = error;
2881 error = 0;
2882 goto nfsmout;
2883 }
2884
2885 switch (procnum) {
2886 case NFSPROC4_CB_NULL:
2887 status = NFSERR_RETVOID;
2888 break;
2889 case NFSPROC4_CB_COMPOUND:
2890 /* tag, minorversion, cb ident, numops, op array */
2891 nfsm_chain_get_32(error, &nmreq, taglen); /* tag length */
2892 nfsm_assert(error, (val <= NFS4_OPAQUE_LIMIT), EBADRPC);
2893
2894 /* start building the body of the response */
2895 nfsm_mbuf_get(error, &mrest, nfsm_rndup(taglen) + 5 * NFSX_UNSIGNED);
2896 nfsm_chain_init(&nmrep, mrest);
2897
2898 /* copy tag from request to response */
2899 nfsm_chain_add_32(error, &nmrep, taglen); /* tag length */
2900 for (len = (int)taglen; !error && (len > 0); len -= NFSX_UNSIGNED) {
2901 nfsm_chain_get_32(error, &nmreq, val);
2902 nfsm_chain_add_32(error, &nmrep, val);
2903 }
2904
2905 /* insert number of results placeholder */
2906 numres = 0;
2907 nfsm_chain_add_32(error, &nmrep, numres);
2908 pnumres = (uint32_t*)(nmrep.nmc_ptr - NFSX_UNSIGNED);
2909
2910 nfsm_chain_get_32(error, &nmreq, val); /* minorversion */
2911 nfsm_assert(error, (val == 0), NFSERR_MINOR_VERS_MISMATCH);
2912 nfsm_chain_get_32(error, &nmreq, cbid); /* callback ID */
2913 nfsm_chain_get_32(error, &nmreq, numops); /* number of operations */
2914 if (error) {
2915 if ((error == EBADRPC) || (error == NFSERR_MINOR_VERS_MISMATCH)) {
2916 status = error;
2917 } else if ((error == ENOBUFS) || (error == ENOMEM)) {
2918 status = NFSERR_RESOURCE;
2919 } else {
2920 status = NFSERR_SERVERFAULT;
2921 }
2922 error = 0;
2923 nfsm_chain_null(&nmrep);
2924 goto nfsmout;
2925 }
2926 /* match the callback ID to a registered mount */
2927 lck_mtx_lock(nfs_global_mutex);
2928 TAILQ_FOREACH(nmp, &nfs4_cb_mounts, nm_cblink) {
2929 if (nmp->nm_cbid != cbid) {
2930 continue;
2931 }
2932 /* verify socket's source address matches this mount's server address */
2933 if (!nmp->nm_saddr) {
2934 continue;
2935 }
2936 if (nfs_sockaddr_cmp((struct sockaddr*)&ncbsp->ncbs_saddr, nmp->nm_saddr) == 0) {
2937 break;
2938 }
2939 }
2940 /* mark the NFS mount as busy */
2941 if (nmp) {
2942 nmp->nm_cbrefs++;
2943 }
2944 lck_mtx_unlock(nfs_global_mutex);
2945 if (!nmp) {
2946 /* if no mount match, just drop socket. */
2947 error = EPERM;
2948 nfsm_chain_null(&nmrep);
2949 goto out;
2950 }
2951
2952 /* process ops, adding results to mrest */
2953 while (numops > 0) {
2954 numops--;
2955 nfsm_chain_get_32(error, &nmreq, op);
2956 if (error) {
2957 break;
2958 }
2959 switch (op) {
2960 case NFS_OP_CB_GETATTR:
2961 // (FH, BITMAP) -> (STATUS, BITMAP, ATTRS)
2962 np = NULL;
2963 nfsm_chain_get_fh(error, &nmreq, NFS_VER4, fh);
2964 bmlen = NFS_ATTR_BITMAP_LEN;
2965 nfsm_chain_get_bitmap(error, &nmreq, bitmap, bmlen);
2966 if (error) {
2967 status = error;
2968 error = 0;
2969 numops = 0; /* don't process any more ops */
2970 } else {
2971 /* find the node for the file handle */
2972 error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh->fh_data, fh->fh_len, NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &np);
2973 if (error || !np) {
2974 status = NFSERR_BADHANDLE;
2975 error = 0;
2976 np = NULL;
2977 numops = 0; /* don't process any more ops */
2978 }
2979 }
2980 nfsm_chain_add_32(error, &nmrep, op);
2981 nfsm_chain_add_32(error, &nmrep, status);
2982 if (!error && (status == EBADRPC)) {
2983 error = status;
2984 }
2985 if (np) {
2986 /* only allow returning size, change, and mtime attrs */
2987 NFS_CLEAR_ATTRIBUTES(&rbitmap);
2988 attrbytes = 0;
2989 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_CHANGE)) {
2990 NFS_BITMAP_SET(&rbitmap, NFS_FATTR_CHANGE);
2991 attrbytes += 2 * NFSX_UNSIGNED;
2992 }
2993 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_SIZE)) {
2994 NFS_BITMAP_SET(&rbitmap, NFS_FATTR_SIZE);
2995 attrbytes += 2 * NFSX_UNSIGNED;
2996 }
2997 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_TIME_MODIFY)) {
2998 NFS_BITMAP_SET(&rbitmap, NFS_FATTR_TIME_MODIFY);
2999 attrbytes += 3 * NFSX_UNSIGNED;
3000 }
3001 nfsm_chain_add_bitmap(error, &nmrep, rbitmap, NFS_ATTR_BITMAP_LEN);
3002 nfsm_chain_add_32(error, &nmrep, attrbytes);
3003 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_CHANGE)) {
3004 nfsm_chain_add_64(error, &nmrep,
3005 np->n_vattr.nva_change + ((np->n_flag & NMODIFIED) ? 1 : 0));
3006 }
3007 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_SIZE)) {
3008 nfsm_chain_add_64(error, &nmrep, np->n_size);
3009 }
3010 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_TIME_MODIFY)) {
3011 nfsm_chain_add_64(error, &nmrep, np->n_vattr.nva_timesec[NFSTIME_MODIFY]);
3012 nfsm_chain_add_32(error, &nmrep, np->n_vattr.nva_timensec[NFSTIME_MODIFY]);
3013 }
3014 nfs_node_unlock(np);
3015 vnode_put(NFSTOV(np));
3016 np = NULL;
3017 }
3018 /*
3019 * If we hit an error building the reply, we can't easily back up.
3020 * So we'll just update the status and hope the server ignores the
3021 * extra garbage.
3022 */
3023 break;
3024 case NFS_OP_CB_RECALL:
3025 // (STATEID, TRUNCATE, FH) -> (STATUS)
3026 np = NULL;
3027 nfsm_chain_get_stateid(error, &nmreq, &stateid);
3028 nfsm_chain_get_32(error, &nmreq, truncate);
3029 nfsm_chain_get_fh(error, &nmreq, NFS_VER4, fh);
3030 if (error) {
3031 status = error;
3032 error = 0;
3033 numops = 0; /* don't process any more ops */
3034 } else {
3035 /* find the node for the file handle */
3036 error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh->fh_data, fh->fh_len, NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &np);
3037 if (error || !np) {
3038 status = NFSERR_BADHANDLE;
3039 error = 0;
3040 np = NULL;
3041 numops = 0; /* don't process any more ops */
3042 } else if (!(np->n_openflags & N_DELEG_MASK) ||
3043 bcmp(&np->n_dstateid, &stateid, sizeof(stateid))) {
3044 /* delegation stateid state doesn't match */
3045 status = NFSERR_BAD_STATEID;
3046 numops = 0; /* don't process any more ops */
3047 }
3048 if (!status) { /* add node to recall queue, and wake socket thread */
3049 nfs4_delegation_return_enqueue(np);
3050 }
3051 if (np) {
3052 nfs_node_unlock(np);
3053 vnode_put(NFSTOV(np));
3054 }
3055 }
3056 nfsm_chain_add_32(error, &nmrep, op);
3057 nfsm_chain_add_32(error, &nmrep, status);
3058 if (!error && (status == EBADRPC)) {
3059 error = status;
3060 }
3061 break;
3062 case NFS_OP_CB_ILLEGAL:
3063 default:
3064 nfsm_chain_add_32(error, &nmrep, NFS_OP_CB_ILLEGAL);
3065 status = NFSERR_OP_ILLEGAL;
3066 nfsm_chain_add_32(error, &nmrep, status);
3067 numops = 0; /* don't process any more ops */
3068 break;
3069 }
3070 numres++;
3071 }
3072
3073 if (!status && error) {
3074 if (error == EBADRPC) {
3075 status = error;
3076 } else if ((error == ENOBUFS) || (error == ENOMEM)) {
3077 status = NFSERR_RESOURCE;
3078 } else {
3079 status = NFSERR_SERVERFAULT;
3080 }
3081 error = 0;
3082 }
3083
3084 /* Now, set the numres field */
3085 *pnumres = txdr_unsigned(numres);
3086 nfsm_chain_build_done(error, &nmrep);
3087 nfsm_chain_null(&nmrep);
3088
3089 /* drop the callback reference on the mount */
3090 lck_mtx_lock(nfs_global_mutex);
3091 nmp->nm_cbrefs--;
3092 if (!nmp->nm_cbid) {
3093 wakeup(&nmp->nm_cbrefs);
3094 }
3095 lck_mtx_unlock(nfs_global_mutex);
3096 break;
3097 }
3098
3099 nfsmout:
3100 if (status == EBADRPC) {
3101 OSAddAtomic64(1, &nfsstats.rpcinvalid);
3102 }
3103
3104 /* build reply header */
3105 error = mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_DATA, &mhead);
3106 nfsm_chain_init(&nmrep, mhead);
3107 nfsm_chain_add_32(error, &nmrep, 0); /* insert space for an RPC record mark */
3108 nfsm_chain_add_32(error, &nmrep, xid);
3109 nfsm_chain_add_32(error, &nmrep, RPC_REPLY);
3110 if ((status == ERPCMISMATCH) || (status & NFSERR_AUTHERR)) {
3111 nfsm_chain_add_32(error, &nmrep, RPC_MSGDENIED);
3112 if (status & NFSERR_AUTHERR) {
3113 nfsm_chain_add_32(error, &nmrep, RPC_AUTHERR);
3114 nfsm_chain_add_32(error, &nmrep, (status & ~NFSERR_AUTHERR));
3115 } else {
3116 nfsm_chain_add_32(error, &nmrep, RPC_MISMATCH);
3117 nfsm_chain_add_32(error, &nmrep, RPC_VER2);
3118 nfsm_chain_add_32(error, &nmrep, RPC_VER2);
3119 }
3120 } else {
3121 /* reply status */
3122 nfsm_chain_add_32(error, &nmrep, RPC_MSGACCEPTED);
3123 /* XXX RPCAUTH_NULL verifier */
3124 nfsm_chain_add_32(error, &nmrep, RPCAUTH_NULL);
3125 nfsm_chain_add_32(error, &nmrep, 0);
3126 /* accepted status */
3127 switch (status) {
3128 case EPROGUNAVAIL:
3129 nfsm_chain_add_32(error, &nmrep, RPC_PROGUNAVAIL);
3130 break;
3131 case EPROGMISMATCH:
3132 nfsm_chain_add_32(error, &nmrep, RPC_PROGMISMATCH);
3133 nfsm_chain_add_32(error, &nmrep, NFS4_CALLBACK_PROG_VERSION);
3134 nfsm_chain_add_32(error, &nmrep, NFS4_CALLBACK_PROG_VERSION);
3135 break;
3136 case EPROCUNAVAIL:
3137 nfsm_chain_add_32(error, &nmrep, RPC_PROCUNAVAIL);
3138 break;
3139 case EBADRPC:
3140 nfsm_chain_add_32(error, &nmrep, RPC_GARBAGE);
3141 break;
3142 default:
3143 nfsm_chain_add_32(error, &nmrep, RPC_SUCCESS);
3144 if (status != NFSERR_RETVOID) {
3145 nfsm_chain_add_32(error, &nmrep, status);
3146 }
3147 break;
3148 }
3149 }
3150 nfsm_chain_build_done(error, &nmrep);
3151 if (error) {
3152 nfsm_chain_null(&nmrep);
3153 goto out;
3154 }
3155 error = mbuf_setnext(nmrep.nmc_mcur, mrest);
3156 if (error) {
3157 printf("nfs cb: mbuf_setnext failed %d\n", error);
3158 goto out;
3159 }
3160 mrest = NULL;
3161 /* Calculate the size of the reply */
3162 replen = 0;
3163 for (m = nmrep.nmc_mhead; m; m = mbuf_next(m)) {
3164 replen += mbuf_len(m);
3165 }
3166 mbuf_pkthdr_setlen(mhead, replen);
3167 error = mbuf_pkthdr_setrcvif(mhead, NULL);
3168 nfsm_chain_set_recmark(error, &nmrep, (replen - NFSX_UNSIGNED) | 0x80000000);
3169 nfsm_chain_null(&nmrep);
3170
3171 /* send the reply */
3172 bzero(&msg, sizeof(msg));
3173 error = sock_sendmbuf(so, &msg, mhead, 0, &sentlen);
3174 mhead = NULL;
3175 if (!error && ((int)sentlen != replen)) {
3176 error = EWOULDBLOCK;
3177 }
3178 if (error == EWOULDBLOCK) { /* inability to send response is considered fatal */
3179 error = ETIMEDOUT;
3180 }
3181 out:
3182 if (error) {
3183 nfsm_chain_cleanup(&nmrep);
3184 }
3185 if (mhead) {
3186 mbuf_freem(mhead);
3187 }
3188 if (mrest) {
3189 mbuf_freem(mrest);
3190 }
3191 if (mreq) {
3192 mbuf_freem(mreq);
3193 }
3194 NFS_ZFREE(nfs_fhandle_zone, fh);
3195 return error;
3196 }
3197 #endif /* CONFIG_NFS4 */
3198
3199 /*
3200 * Initialize an nfs_rpc_record_state structure.
3201 */
3202 void
3203 nfs_rpc_record_state_init(struct nfs_rpc_record_state *nrrsp)
3204 {
3205 bzero(nrrsp, sizeof(*nrrsp));
3206 nrrsp->nrrs_markerleft = sizeof(nrrsp->nrrs_fragleft);
3207 }
3208
3209 /*
3210 * Clean up an nfs_rpc_record_state structure.
3211 */
3212 void
3213 nfs_rpc_record_state_cleanup(struct nfs_rpc_record_state *nrrsp)
3214 {
3215 if (nrrsp->nrrs_m) {
3216 mbuf_freem(nrrsp->nrrs_m);
3217 nrrsp->nrrs_m = nrrsp->nrrs_mlast = NULL;
3218 }
3219 }
3220
3221 /*
3222 * Read the next (marked) RPC record from the socket.
3223 *
3224 * *recvp returns if any data was received.
3225 * *mp returns the next complete RPC record
3226 */
3227 int
3228 nfs_rpc_record_read(socket_t so, struct nfs_rpc_record_state *nrrsp, int flags, int *recvp, mbuf_t *mp)
3229 {
3230 struct iovec aio;
3231 struct msghdr msg;
3232 size_t rcvlen;
3233 int error = 0;
3234 mbuf_t m;
3235
3236 *recvp = 0;
3237 *mp = NULL;
3238
3239 /* read the TCP RPC record marker */
3240 while (!error && nrrsp->nrrs_markerleft) {
3241 aio.iov_base = ((char*)&nrrsp->nrrs_fragleft +
3242 sizeof(nrrsp->nrrs_fragleft) - nrrsp->nrrs_markerleft);
3243 aio.iov_len = nrrsp->nrrs_markerleft;
3244 bzero(&msg, sizeof(msg));
3245 msg.msg_iov = &aio;
3246 msg.msg_iovlen = 1;
3247 error = sock_receive(so, &msg, flags, &rcvlen);
3248 if (error || !rcvlen) {
3249 break;
3250 }
3251 *recvp = 1;
3252 nrrsp->nrrs_markerleft -= rcvlen;
3253 if (nrrsp->nrrs_markerleft) {
3254 continue;
3255 }
3256 /* record marker complete */
3257 nrrsp->nrrs_fragleft = ntohl(nrrsp->nrrs_fragleft);
3258 if (nrrsp->nrrs_fragleft & 0x80000000) {
3259 nrrsp->nrrs_lastfrag = 1;
3260 nrrsp->nrrs_fragleft &= ~0x80000000;
3261 }
3262 nrrsp->nrrs_reclen += nrrsp->nrrs_fragleft;
3263 if (nrrsp->nrrs_reclen > NFS_MAXPACKET) {
3264 /* This is SERIOUS! We are out of sync with the sender. */
3265 log(LOG_ERR, "impossible RPC record length (%d) on callback", nrrsp->nrrs_reclen);
3266 error = EFBIG;
3267 }
3268 }
3269
3270 /* read the TCP RPC record fragment */
3271 while (!error && !nrrsp->nrrs_markerleft && nrrsp->nrrs_fragleft) {
3272 m = NULL;
3273 rcvlen = nrrsp->nrrs_fragleft;
3274 error = sock_receivembuf(so, NULL, &m, flags, &rcvlen);
3275 if (error || !rcvlen || !m) {
3276 break;
3277 }
3278 *recvp = 1;
3279 /* append mbufs to list */
3280 nrrsp->nrrs_fragleft -= rcvlen;
3281 if (!nrrsp->nrrs_m) {
3282 nrrsp->nrrs_m = m;
3283 } else {
3284 error = mbuf_setnext(nrrsp->nrrs_mlast, m);
3285 if (error) {
3286 printf("nfs tcp rcv: mbuf_setnext failed %d\n", error);
3287 mbuf_freem(m);
3288 break;
3289 }
3290 }
3291 while (mbuf_next(m)) {
3292 m = mbuf_next(m);
3293 }
3294 nrrsp->nrrs_mlast = m;
3295 }
3296
3297 /* done reading fragment? */
3298 if (!error && !nrrsp->nrrs_markerleft && !nrrsp->nrrs_fragleft) {
3299 /* reset socket fragment parsing state */
3300 nrrsp->nrrs_markerleft = sizeof(nrrsp->nrrs_fragleft);
3301 if (nrrsp->nrrs_lastfrag) {
3302 /* RPC record complete */
3303 *mp = nrrsp->nrrs_m;
3304 /* reset socket record parsing state */
3305 nrrsp->nrrs_reclen = 0;
3306 nrrsp->nrrs_m = nrrsp->nrrs_mlast = NULL;
3307 nrrsp->nrrs_lastfrag = 0;
3308 }
3309 }
3310
3311 return error;
3312 }
3313
3314
3315
3316 /*
3317 * The NFS client send routine.
3318 *
3319 * Send the given NFS request out the mount's socket.
3320 * Holds nfs_sndlock() for the duration of this call.
3321 *
3322 * - check for request termination (sigintr)
3323 * - wait for reconnect, if necessary
3324 * - UDP: check the congestion window
3325 * - make a copy of the request to send
3326 * - UDP: update the congestion window
3327 * - send the request
3328 *
3329 * If sent successfully, R_MUSTRESEND and R_RESENDERR are cleared.
3330 * rexmit count is also updated if this isn't the first send.
3331 *
3332 * If the send is not successful, make sure R_MUSTRESEND is set.
3333 * If this wasn't the first transmit, set R_RESENDERR.
3334 * Also, undo any UDP congestion window changes made.
3335 *
3336 * If the error appears to indicate that the socket should
3337 * be reconnected, mark the socket for reconnection.
3338 *
3339 * Only return errors when the request should be aborted.
3340 */
3341 int
3342 nfs_send(struct nfsreq *req, int wait)
3343 {
3344 struct nfsmount *nmp;
3345 struct nfs_socket *nso;
3346 int error, error2, sotype, rexmit, slpflag = 0, needrecon;
3347 struct msghdr msg;
3348 struct sockaddr *sendnam;
3349 mbuf_t mreqcopy;
3350 size_t sentlen = 0;
3351 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
3352
3353 again:
3354 error = nfs_sndlock(req);
3355 if (error) {
3356 lck_mtx_lock(&req->r_mtx);
3357 req->r_error = error;
3358 req->r_flags &= ~R_SENDING;
3359 lck_mtx_unlock(&req->r_mtx);
3360 return error;
3361 }
3362
3363 error = nfs_sigintr(req->r_nmp, req, NULL, 0);
3364 if (error) {
3365 nfs_sndunlock(req);
3366 lck_mtx_lock(&req->r_mtx);
3367 req->r_error = error;
3368 req->r_flags &= ~R_SENDING;
3369 lck_mtx_unlock(&req->r_mtx);
3370 return error;
3371 }
3372 nmp = req->r_nmp;
3373 sotype = nmp->nm_sotype;
3374
3375 /*
3376 * If it's a setup RPC but we're not in SETUP... must need reconnect.
3377 * If it's a recovery RPC but the socket's not ready... must need reconnect.
3378 */
3379 if (((req->r_flags & R_SETUP) && !(nmp->nm_sockflags & NMSOCK_SETUP)) ||
3380 ((req->r_flags & R_RECOVER) && !(nmp->nm_sockflags & NMSOCK_READY))) {
3381 error = ETIMEDOUT;
3382 nfs_sndunlock(req);
3383 lck_mtx_lock(&req->r_mtx);
3384 req->r_error = error;
3385 req->r_flags &= ~R_SENDING;
3386 lck_mtx_unlock(&req->r_mtx);
3387 return error;
3388 }
3389
3390 /* If the socket needs reconnection, do that now. */
3391 /* wait until socket is ready - unless this request is part of setup */
3392 lck_mtx_lock(&nmp->nm_lock);
3393 if (!(nmp->nm_sockflags & NMSOCK_READY) &&
3394 !((nmp->nm_sockflags & NMSOCK_SETUP) && (req->r_flags & R_SETUP))) {
3395 if (NMFLAG(nmp, INTR) && !(req->r_flags & R_NOINTR)) {
3396 slpflag |= PCATCH;
3397 }
3398 lck_mtx_unlock(&nmp->nm_lock);
3399 nfs_sndunlock(req);
3400 if (!wait) {
3401 lck_mtx_lock(&req->r_mtx);
3402 req->r_flags &= ~R_SENDING;
3403 req->r_flags |= R_MUSTRESEND;
3404 req->r_rtt = 0;
3405 lck_mtx_unlock(&req->r_mtx);
3406 return 0;
3407 }
3408 NFS_SOCK_DBG("nfs_send: 0x%llx wait reconnect\n", req->r_xid);
3409 lck_mtx_lock(&req->r_mtx);
3410 req->r_flags &= ~R_MUSTRESEND;
3411 req->r_rtt = 0;
3412 lck_mtx_unlock(&req->r_mtx);
3413 lck_mtx_lock(&nmp->nm_lock);
3414 while (!(nmp->nm_sockflags & NMSOCK_READY)) {
3415 /* don't bother waiting if the socket thread won't be reconnecting it */
3416 if (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) {
3417 error = EIO;
3418 break;
3419 }
3420 if ((NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && (nmp->nm_reconnect_start > 0)) {
3421 struct timeval now;
3422 microuptime(&now);
3423 if ((now.tv_sec - nmp->nm_reconnect_start) >= 8) {
3424 /* soft mount in reconnect for a while... terminate ASAP */
3425 OSAddAtomic64(1, &nfsstats.rpctimeouts);
3426 req->r_flags |= R_SOFTTERM;
3427 req->r_error = error = ETIMEDOUT;
3428 break;
3429 }
3430 }
3431 /* make sure socket thread is running, then wait */
3432 nfs_mount_sock_thread_wake(nmp);
3433 if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 1))) {
3434 break;
3435 }
3436 msleep(req, &nmp->nm_lock, slpflag | PSOCK, "nfsconnectwait", &ts);
3437 slpflag = 0;
3438 }
3439 lck_mtx_unlock(&nmp->nm_lock);
3440 if (error) {
3441 lck_mtx_lock(&req->r_mtx);
3442 req->r_error = error;
3443 req->r_flags &= ~R_SENDING;
3444 lck_mtx_unlock(&req->r_mtx);
3445 return error;
3446 }
3447 goto again;
3448 }
3449 nso = nmp->nm_nso;
3450 /* note that we're using the mount's socket to do the send */
3451 nmp->nm_state |= NFSSTA_SENDING; /* will be cleared by nfs_sndunlock() */
3452 lck_mtx_unlock(&nmp->nm_lock);
3453 if (!nso) {
3454 nfs_sndunlock(req);
3455 lck_mtx_lock(&req->r_mtx);
3456 req->r_flags &= ~R_SENDING;
3457 req->r_flags |= R_MUSTRESEND;
3458 req->r_rtt = 0;
3459 lck_mtx_unlock(&req->r_mtx);
3460 return 0;
3461 }
3462
3463 lck_mtx_lock(&req->r_mtx);
3464 rexmit = (req->r_flags & R_SENT);
3465
3466 if (sotype == SOCK_DGRAM) {
3467 lck_mtx_lock(&nmp->nm_lock);
3468 if (!(req->r_flags & R_CWND) && (nmp->nm_sent >= nmp->nm_cwnd)) {
3469 /* if we can't send this out yet, wait on the cwnd queue */
3470 slpflag = (NMFLAG(nmp, INTR) && req->r_thread) ? PCATCH : 0;
3471 lck_mtx_unlock(&nmp->nm_lock);
3472 nfs_sndunlock(req);
3473 req->r_flags &= ~R_SENDING;
3474 req->r_flags |= R_MUSTRESEND;
3475 lck_mtx_unlock(&req->r_mtx);
3476 if (!wait) {
3477 req->r_rtt = 0;
3478 return 0;
3479 }
3480 lck_mtx_lock(&nmp->nm_lock);
3481 while (nmp->nm_sent >= nmp->nm_cwnd) {
3482 if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 1))) {
3483 break;
3484 }
3485 TAILQ_INSERT_TAIL(&nmp->nm_cwndq, req, r_cchain);
3486 msleep(req, &nmp->nm_lock, slpflag | (PZERO - 1), "nfswaitcwnd", &ts);
3487 slpflag = 0;
3488 if ((req->r_cchain.tqe_next != NFSREQNOLIST)) {
3489 TAILQ_REMOVE(&nmp->nm_cwndq, req, r_cchain);
3490 req->r_cchain.tqe_next = NFSREQNOLIST;
3491 }
3492 }
3493 lck_mtx_unlock(&nmp->nm_lock);
3494 goto again;
3495 }
3496 /*
3497 * We update these *before* the send to avoid racing
3498 * against others who may be looking to send requests.
3499 */
3500 if (!rexmit) {
3501 /* first transmit */
3502 req->r_flags |= R_CWND;
3503 nmp->nm_sent += NFS_CWNDSCALE;
3504 } else {
3505 /*
3506 * When retransmitting, turn timing off
3507 * and divide congestion window by 2.
3508 */
3509 req->r_flags &= ~R_TIMING;
3510 nmp->nm_cwnd >>= 1;
3511 if (nmp->nm_cwnd < NFS_CWNDSCALE) {
3512 nmp->nm_cwnd = NFS_CWNDSCALE;
3513 }
3514 }
3515 lck_mtx_unlock(&nmp->nm_lock);
3516 }
3517
3518 req->r_flags &= ~R_MUSTRESEND;
3519 lck_mtx_unlock(&req->r_mtx);
3520
3521 error = mbuf_copym(req->r_mhead, 0, MBUF_COPYALL,
3522 wait ? MBUF_WAITOK : MBUF_DONTWAIT, &mreqcopy);
3523 if (error) {
3524 if (wait) {
3525 log(LOG_INFO, "nfs_send: mbuf copy failed %d\n", error);
3526 }
3527 nfs_sndunlock(req);
3528 lck_mtx_lock(&req->r_mtx);
3529 req->r_flags &= ~R_SENDING;
3530 req->r_flags |= R_MUSTRESEND;
3531 req->r_rtt = 0;
3532 lck_mtx_unlock(&req->r_mtx);
3533 return 0;
3534 }
3535
3536 bzero(&msg, sizeof(msg));
3537 if ((sotype != SOCK_STREAM) && !sock_isconnected(nso->nso_so) && ((sendnam = nmp->nm_saddr))) {
3538 msg.msg_name = (caddr_t)sendnam;
3539 msg.msg_namelen = sendnam->sa_len;
3540 }
3541 NFS_SOCK_DUMP_MBUF("Sending mbuf\n", mreqcopy);
3542 error = sock_sendmbuf(nso->nso_so, &msg, mreqcopy, 0, &sentlen);
3543 if (error || (sentlen != req->r_mreqlen)) {
3544 NFS_SOCK_DBG("nfs_send: 0x%llx sent %d/%d error %d\n",
3545 req->r_xid, (int)sentlen, (int)req->r_mreqlen, error);
3546 }
3547
3548 if (!error && (sentlen != req->r_mreqlen)) {
3549 error = EWOULDBLOCK;
3550 }
3551 needrecon = ((sotype == SOCK_STREAM) && sentlen && (sentlen != req->r_mreqlen));
3552
3553 lck_mtx_lock(&req->r_mtx);
3554 req->r_flags &= ~R_SENDING;
3555 req->r_rtt = 0;
3556 if (rexmit && (++req->r_rexmit > NFS_MAXREXMIT)) {
3557 req->r_rexmit = NFS_MAXREXMIT;
3558 }
3559
3560 if (!error) {
3561 /* SUCCESS */
3562 req->r_flags &= ~R_RESENDERR;
3563 if (rexmit) {
3564 OSAddAtomic64(1, &nfsstats.rpcretries);
3565 }
3566 req->r_flags |= R_SENT;
3567 if (req->r_flags & R_WAITSENT) {
3568 req->r_flags &= ~R_WAITSENT;
3569 wakeup(req);
3570 }
3571 nfs_sndunlock(req);
3572 lck_mtx_unlock(&req->r_mtx);
3573 return 0;
3574 }
3575
3576 /* send failed */
3577 req->r_flags |= R_MUSTRESEND;
3578 if (rexmit) {
3579 req->r_flags |= R_RESENDERR;
3580 }
3581 if ((error == EINTR) || (error == ERESTART)) {
3582 req->r_error = error;
3583 }
3584 lck_mtx_unlock(&req->r_mtx);
3585
3586 if (sotype == SOCK_DGRAM) {
3587 /*
3588 * Note: even though a first send may fail, we consider
3589 * the request sent for congestion window purposes.
3590 * So we don't need to undo any of the changes made above.
3591 */
3592 /*
3593 * Socket errors ignored for connectionless sockets??
3594 * For now, ignore them all
3595 */
3596 if ((error != EINTR) && (error != ERESTART) &&
3597 (error != EWOULDBLOCK) && (error != EIO) && (nso == nmp->nm_nso)) {
3598 int clearerror = 0, optlen = sizeof(clearerror);
3599 sock_getsockopt(nso->nso_so, SOL_SOCKET, SO_ERROR, &clearerror, &optlen);
3600 #ifdef NFS_SOCKET_DEBUGGING
3601 if (clearerror) {
3602 NFS_SOCK_DBG("nfs_send: ignoring UDP socket error %d so %d\n",
3603 error, clearerror);
3604 }
3605 #endif
3606 }
3607 }
3608
3609 /* check if it appears we should reconnect the socket */
3610 switch (error) {
3611 case EWOULDBLOCK:
3612 /* if send timed out, reconnect if on TCP */
3613 if (sotype != SOCK_STREAM) {
3614 break;
3615 }
3616 OS_FALLTHROUGH;
3617 case EPIPE:
3618 case EADDRNOTAVAIL:
3619 case ENETDOWN:
3620 case ENETUNREACH:
3621 case ENETRESET:
3622 case ECONNABORTED:
3623 case ECONNRESET:
3624 case ENOTCONN:
3625 case ESHUTDOWN:
3626 case ECONNREFUSED:
3627 case EHOSTDOWN:
3628 case EHOSTUNREACH:
3629 /* case ECANCELED??? */
3630 needrecon = 1;
3631 break;
3632 }
3633 if (needrecon && (nso == nmp->nm_nso)) { /* mark socket as needing reconnect */
3634 NFS_SOCK_DBG("nfs_send: 0x%llx need reconnect %d\n", req->r_xid, error);
3635 nfs_need_reconnect(nmp);
3636 }
3637
3638 nfs_sndunlock(req);
3639
3640 if (nfs_is_dead(error, nmp)) {
3641 error = EIO;
3642 }
3643
3644 /*
3645 * Don't log some errors:
3646 * EPIPE errors may be common with servers that drop idle connections.
3647 * EADDRNOTAVAIL may occur on network transitions.
3648 * ENOTCONN may occur under some network conditions.
3649 */
3650 if ((error == EPIPE) || (error == EADDRNOTAVAIL) || (error == ENOTCONN)) {
3651 error = 0;
3652 }
3653 if (error && (error != EINTR) && (error != ERESTART)) {
3654 log(LOG_INFO, "nfs send error %d for server %s\n", error,
3655 !req->r_nmp ? "<unmounted>" :
3656 vfs_statfs(req->r_nmp->nm_mountp)->f_mntfromname);
3657 }
3658
3659 /* prefer request termination error over other errors */
3660 error2 = nfs_sigintr(req->r_nmp, req, req->r_thread, 0);
3661 if (error2) {
3662 error = error2;
3663 }
3664
3665 /* only allow the following errors to be returned */
3666 if ((error != EINTR) && (error != ERESTART) && (error != EIO) &&
3667 (error != ENXIO) && (error != ETIMEDOUT)) {
3668 /*
3669 * We got some error we don't know what do do with,
3670 * i.e., we're not reconnecting, we map it to
3671 * EIO. Presumably our send failed and we better tell
3672 * the caller so they don't wait for a reply that is
3673 * never going to come. If we are reconnecting we
3674 * return 0 and the request will be resent.
3675 */
3676 error = needrecon ? 0 : EIO;
3677 }
3678 return error;
3679 }
3680
3681 /*
3682 * NFS client socket upcalls
3683 *
3684 * Pull RPC replies out of an NFS mount's socket and match them
3685 * up with the pending request.
3686 *
3687 * The datagram code is simple because we always get whole
3688 * messages out of the socket.
3689 *
3690 * The stream code is more involved because we have to parse
3691 * the RPC records out of the stream.
3692 */
3693
3694 /* NFS client UDP socket upcall */
3695 void
3696 nfs_udp_rcv(socket_t so, void *arg, __unused int waitflag)
3697 {
3698 struct nfsmount *nmp = arg;
3699 struct nfs_socket *nso = nmp->nm_nso;
3700 size_t rcvlen;
3701 mbuf_t m;
3702 int error = 0;
3703
3704 if (nmp->nm_sockflags & NMSOCK_CONNECTING) {
3705 return;
3706 }
3707
3708 do {
3709 /* make sure we're on the current socket */
3710 if (!nso || (nso->nso_so != so)) {
3711 return;
3712 }
3713
3714 m = NULL;
3715 rcvlen = 1000000;
3716 error = sock_receivembuf(so, NULL, &m, MSG_DONTWAIT, &rcvlen);
3717 if (m) {
3718 nfs_request_match_reply(nmp, m);
3719 }
3720 } while (m && !error);
3721
3722 if (error && (error != EWOULDBLOCK)) {
3723 /* problems with the socket... mark for reconnection */
3724 NFS_SOCK_DBG("nfs_udp_rcv: need reconnect %d\n", error);
3725 nfs_need_reconnect(nmp);
3726 }
3727 }
3728
3729 /* NFS client TCP socket upcall */
3730 void
3731 nfs_tcp_rcv(socket_t so, void *arg, __unused int waitflag)
3732 {
3733 struct nfsmount *nmp = arg;
3734 struct nfs_socket *nso = nmp->nm_nso;
3735 struct nfs_rpc_record_state nrrs;
3736 mbuf_t m;
3737 int error = 0;
3738 int recv = 1;
3739 int wup = 0;
3740
3741 if (nmp->nm_sockflags & NMSOCK_CONNECTING) {
3742 return;
3743 }
3744
3745 /* make sure we're on the current socket */
3746 lck_mtx_lock(&nmp->nm_lock);
3747 nso = nmp->nm_nso;
3748 if (!nso || (nso->nso_so != so) || (nmp->nm_sockflags & (NMSOCK_DISCONNECTING))) {
3749 lck_mtx_unlock(&nmp->nm_lock);
3750 return;
3751 }
3752 lck_mtx_unlock(&nmp->nm_lock);
3753
3754 /* make sure this upcall should be trying to do work */
3755 lck_mtx_lock(&nso->nso_lock);
3756 if (nso->nso_flags & (NSO_UPCALL | NSO_DISCONNECTING | NSO_DEAD)) {
3757 lck_mtx_unlock(&nso->nso_lock);
3758 return;
3759 }
3760 nso->nso_flags |= NSO_UPCALL;
3761 nrrs = nso->nso_rrs;
3762 lck_mtx_unlock(&nso->nso_lock);
3763
3764 /* loop while we make error-free progress */
3765 while (!error && recv) {
3766 error = nfs_rpc_record_read(so, &nrrs, MSG_DONTWAIT, &recv, &m);
3767 if (m) { /* match completed response with request */
3768 nfs_request_match_reply(nmp, m);
3769 }
3770 }
3771
3772 /* Update the sockets's rpc parsing state */
3773 lck_mtx_lock(&nso->nso_lock);
3774 nso->nso_rrs = nrrs;
3775 if (nso->nso_flags & NSO_DISCONNECTING) {
3776 wup = 1;
3777 }
3778 nso->nso_flags &= ~NSO_UPCALL;
3779 lck_mtx_unlock(&nso->nso_lock);
3780 if (wup) {
3781 wakeup(&nso->nso_flags);
3782 }
3783
3784 #ifdef NFS_SOCKET_DEBUGGING
3785 if (!recv && (error != EWOULDBLOCK)) {
3786 NFS_SOCK_DBG("nfs_tcp_rcv: got nothing, error %d, got FIN?\n", error);
3787 }
3788 #endif
3789 /* note: no error and no data indicates server closed its end */
3790 if ((error != EWOULDBLOCK) && (error || !recv)) {
3791 /* problems with the socket... mark for reconnection */
3792 NFS_SOCK_DBG("nfs_tcp_rcv: need reconnect %d\n", error);
3793 nfs_need_reconnect(nmp);
3794 }
3795 }
3796
3797 /*
3798 * "poke" a socket to try to provoke any pending errors
3799 */
3800 void
3801 nfs_sock_poke(struct nfsmount *nmp)
3802 {
3803 struct iovec aio;
3804 struct msghdr msg;
3805 size_t len;
3806 int error = 0;
3807 int dummy;
3808
3809 lck_mtx_lock(&nmp->nm_lock);
3810 if ((nmp->nm_sockflags & NMSOCK_UNMOUNT) ||
3811 !(nmp->nm_sockflags & NMSOCK_READY) || !nmp->nm_nso || !nmp->nm_nso->nso_so) {
3812 /* Nothing to poke */
3813 nmp->nm_sockflags &= ~NMSOCK_POKE;
3814 wakeup(&nmp->nm_sockflags);
3815 lck_mtx_unlock(&nmp->nm_lock);
3816 return;
3817 }
3818 lck_mtx_unlock(&nmp->nm_lock);
3819 aio.iov_base = &dummy;
3820 aio.iov_len = 0;
3821 len = 0;
3822 bzero(&msg, sizeof(msg));
3823 msg.msg_iov = &aio;
3824 msg.msg_iovlen = 1;
3825 error = sock_send(nmp->nm_nso->nso_so, &msg, MSG_DONTWAIT, &len);
3826 NFS_SOCK_DBG("nfs_sock_poke: error %d\n", error);
3827 lck_mtx_lock(&nmp->nm_lock);
3828 nmp->nm_sockflags &= ~NMSOCK_POKE;
3829 wakeup(&nmp->nm_sockflags);
3830 lck_mtx_unlock(&nmp->nm_lock);
3831 nfs_is_dead(error, nmp);
3832 }
3833
3834 /*
3835 * Match an RPC reply with the corresponding request
3836 */
3837 void
3838 nfs_request_match_reply(struct nfsmount *nmp, mbuf_t mrep)
3839 {
3840 struct nfsreq *req;
3841 struct nfsm_chain nmrep;
3842 u_int32_t reply = 0, rxid = 0;
3843 int error = 0, asyncioq, t1;
3844
3845 bzero(&nmrep, sizeof(nmrep));
3846 /* Get the xid and check that it is an rpc reply */
3847 nfsm_chain_dissect_init(error, &nmrep, mrep);
3848 nfsm_chain_get_32(error, &nmrep, rxid);
3849 nfsm_chain_get_32(error, &nmrep, reply);
3850 if (error || (reply != RPC_REPLY)) {
3851 OSAddAtomic64(1, &nfsstats.rpcinvalid);
3852 mbuf_freem(mrep);
3853 return;
3854 }
3855
3856 /*
3857 * Loop through the request list to match up the reply
3858 * Iff no match, just drop it.
3859 */
3860 lck_mtx_lock(nfs_request_mutex);
3861 TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
3862 if (req->r_nmrep.nmc_mhead || (rxid != R_XID32(req->r_xid))) {
3863 continue;
3864 }
3865 /* looks like we have it, grab lock and double check */
3866 lck_mtx_lock(&req->r_mtx);
3867 if (req->r_nmrep.nmc_mhead || (rxid != R_XID32(req->r_xid))) {
3868 lck_mtx_unlock(&req->r_mtx);
3869 continue;
3870 }
3871 /* Found it.. */
3872 req->r_nmrep = nmrep;
3873 lck_mtx_lock(&nmp->nm_lock);
3874 if (nmp->nm_sotype == SOCK_DGRAM) {
3875 /*
3876 * Update congestion window.
3877 * Do the additive increase of one rpc/rtt.
3878 */
3879 FSDBG(530, R_XID32(req->r_xid), req, nmp->nm_sent, nmp->nm_cwnd);
3880 if (nmp->nm_cwnd <= nmp->nm_sent) {
3881 nmp->nm_cwnd +=
3882 ((NFS_CWNDSCALE * NFS_CWNDSCALE) +
3883 (nmp->nm_cwnd >> 1)) / nmp->nm_cwnd;
3884 if (nmp->nm_cwnd > NFS_MAXCWND) {
3885 nmp->nm_cwnd = NFS_MAXCWND;
3886 }
3887 }
3888 if (req->r_flags & R_CWND) {
3889 nmp->nm_sent -= NFS_CWNDSCALE;
3890 req->r_flags &= ~R_CWND;
3891 }
3892 if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) {
3893 /* congestion window is open, poke the cwnd queue */
3894 struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq);
3895 TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain);
3896 req2->r_cchain.tqe_next = NFSREQNOLIST;
3897 wakeup(req2);
3898 }
3899 }
3900 /*
3901 * Update rtt using a gain of 0.125 on the mean
3902 * and a gain of 0.25 on the deviation.
3903 */
3904 if (req->r_flags & R_TIMING) {
3905 /*
3906 * Since the timer resolution of
3907 * NFS_HZ is so course, it can often
3908 * result in r_rtt == 0. Since
3909 * r_rtt == N means that the actual
3910 * rtt is between N+dt and N+2-dt ticks,
3911 * add 1.
3912 */
3913 if (proct[req->r_procnum] == 0) {
3914 panic("nfs_request_match_reply: proct[%d] is zero", req->r_procnum);
3915 }
3916 t1 = req->r_rtt + 1;
3917 t1 -= (NFS_SRTT(req) >> 3);
3918 NFS_SRTT(req) += t1;
3919 if (t1 < 0) {
3920 t1 = -t1;
3921 }
3922 t1 -= (NFS_SDRTT(req) >> 2);
3923 NFS_SDRTT(req) += t1;
3924 }
3925 nmp->nm_timeouts = 0;
3926 lck_mtx_unlock(&nmp->nm_lock);
3927 /* signal anyone waiting on this request */
3928 wakeup(req);
3929 asyncioq = (req->r_callback.rcb_func != NULL);
3930 #if CONFIG_NFS_GSS
3931 if (nfs_request_using_gss(req)) {
3932 nfs_gss_clnt_rpcdone(req);
3933 }
3934 #endif /* CONFIG_NFS_GSS */
3935 lck_mtx_unlock(&req->r_mtx);
3936 lck_mtx_unlock(nfs_request_mutex);
3937 /* if it's an async RPC with a callback, queue it up */
3938 if (asyncioq) {
3939 nfs_asyncio_finish(req);
3940 }
3941 break;
3942 }
3943
3944 if (!req) {
3945 /* not matched to a request, so drop it. */
3946 lck_mtx_unlock(nfs_request_mutex);
3947 OSAddAtomic64(1, &nfsstats.rpcunexpected);
3948 mbuf_freem(mrep);
3949 }
3950 }
3951
3952 /*
3953 * Wait for the reply for a given request...
3954 * ...potentially resending the request if necessary.
3955 */
3956 int
3957 nfs_wait_reply(struct nfsreq *req)
3958 {
3959 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
3960 int error = 0, slpflag, first = 1;
3961
3962 if (req->r_nmp && NMFLAG(req->r_nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) {
3963 slpflag = PCATCH;
3964 } else {
3965 slpflag = 0;
3966 }
3967
3968 lck_mtx_lock(&req->r_mtx);
3969 while (!req->r_nmrep.nmc_mhead) {
3970 if ((error = nfs_sigintr(req->r_nmp, req, first ? NULL : req->r_thread, 0))) {
3971 break;
3972 }
3973 if (((error = req->r_error)) || req->r_nmrep.nmc_mhead) {
3974 break;
3975 }
3976 /* check if we need to resend */
3977 if (req->r_flags & R_MUSTRESEND) {
3978 NFS_SOCK_DBG("nfs wait resend: p %d x 0x%llx f 0x%x rtt %d\n",
3979 req->r_procnum, req->r_xid, req->r_flags, req->r_rtt);
3980 req->r_flags |= R_SENDING;
3981 lck_mtx_unlock(&req->r_mtx);
3982 if (nfs_request_using_gss(req)) {
3983 /*
3984 * It's an RPCSEC_GSS request.
3985 * Can't just resend the original request
3986 * without bumping the cred sequence number.
3987 * Go back and re-build the request.
3988 */
3989 lck_mtx_lock(&req->r_mtx);
3990 req->r_flags &= ~R_SENDING;
3991 lck_mtx_unlock(&req->r_mtx);
3992 return EAGAIN;
3993 }
3994 error = nfs_send(req, 1);
3995 lck_mtx_lock(&req->r_mtx);
3996 NFS_SOCK_DBG("nfs wait resend: p %d x 0x%llx f 0x%x rtt %d err %d\n",
3997 req->r_procnum, req->r_xid, req->r_flags, req->r_rtt, error);
3998 if (error) {
3999 break;
4000 }
4001 if (((error = req->r_error)) || req->r_nmrep.nmc_mhead) {
4002 break;
4003 }
4004 }
4005 /* need to poll if we're P_NOREMOTEHANG */
4006 if (nfs_noremotehang(req->r_thread)) {
4007 ts.tv_sec = 1;
4008 }
4009 msleep(req, &req->r_mtx, slpflag | (PZERO - 1), "nfswaitreply", &ts);
4010 first = slpflag = 0;
4011 }
4012 lck_mtx_unlock(&req->r_mtx);
4013
4014 return error;
4015 }
4016
4017 /*
4018 * An NFS request goes something like this:
4019 * (nb: always frees up mreq mbuf list)
4020 * nfs_request_create()
4021 * - allocates a request struct if one is not provided
4022 * - initial fill-in of the request struct
4023 * nfs_request_add_header()
4024 * - add the RPC header
4025 * nfs_request_send()
4026 * - link it into list
4027 * - call nfs_send() for first transmit
4028 * nfs_request_wait()
4029 * - call nfs_wait_reply() to wait for the reply
4030 * nfs_request_finish()
4031 * - break down rpc header and return with error or nfs reply
4032 * pointed to by nmrep.
4033 * nfs_request_rele()
4034 * nfs_request_destroy()
4035 * - clean up the request struct
4036 * - free the request struct if it was allocated by nfs_request_create()
4037 */
4038
4039 /*
4040 * Set up an NFS request struct (allocating if no request passed in).
4041 */
4042 int
4043 nfs_request_create(
4044 nfsnode_t np,
4045 mount_t mp, /* used only if !np */
4046 struct nfsm_chain *nmrest,
4047 int procnum,
4048 thread_t thd,
4049 kauth_cred_t cred,
4050 struct nfsreq **reqp)
4051 {
4052 struct nfsreq *req, *newreq = NULL;
4053 struct nfsmount *nmp;
4054
4055 req = *reqp;
4056 if (!req) {
4057 /* allocate a new NFS request structure */
4058 req = newreq = zalloc_flags(nfs_req_zone, Z_WAITOK | Z_ZERO);
4059 } else {
4060 bzero(req, sizeof(*req));
4061 }
4062 if (req == newreq) {
4063 req->r_flags = R_ALLOCATED;
4064 }
4065
4066 nmp = VFSTONFS(np ? NFSTOMP(np) : mp);
4067 if (nfs_mount_gone(nmp)) {
4068 if (newreq) {
4069 NFS_ZFREE(nfs_req_zone, newreq);
4070 }
4071 return ENXIO;
4072 }
4073 lck_mtx_lock(&nmp->nm_lock);
4074 if ((nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) &&
4075 (nmp->nm_state & NFSSTA_TIMEO)) {
4076 lck_mtx_unlock(&nmp->nm_lock);
4077 mbuf_freem(nmrest->nmc_mhead);
4078 nmrest->nmc_mhead = NULL;
4079 if (newreq) {
4080 NFS_ZFREE(nfs_req_zone, newreq);
4081 }
4082 return ENXIO;
4083 }
4084
4085 if ((nmp->nm_vers != NFS_VER4) && (procnum >= 0) && (procnum < NFS_NPROCS)) {
4086 OSAddAtomic64(1, &nfsstats.rpccnt[procnum]);
4087 }
4088 if ((nmp->nm_vers == NFS_VER4) && (procnum != NFSPROC4_COMPOUND) && (procnum != NFSPROC4_NULL)) {
4089 panic("nfs_request: invalid NFSv4 RPC request %d\n", procnum);
4090 }
4091
4092 lck_mtx_init(&req->r_mtx, nfs_request_grp, LCK_ATTR_NULL);
4093 req->r_nmp = nmp;
4094 nmp->nm_ref++;
4095 req->r_np = np;
4096 req->r_thread = thd;
4097 if (!thd) {
4098 req->r_flags |= R_NOINTR;
4099 }
4100 if (IS_VALID_CRED(cred)) {
4101 kauth_cred_ref(cred);
4102 req->r_cred = cred;
4103 }
4104 req->r_procnum = procnum;
4105 if (proct[procnum] > 0) {
4106 req->r_flags |= R_TIMING;
4107 }
4108 req->r_nmrep.nmc_mhead = NULL;
4109 SLIST_INIT(&req->r_gss_seqlist);
4110 req->r_achain.tqe_next = NFSREQNOLIST;
4111 req->r_rchain.tqe_next = NFSREQNOLIST;
4112 req->r_cchain.tqe_next = NFSREQNOLIST;
4113
4114 /* set auth flavor to use for request */
4115 if (!req->r_cred) {
4116 req->r_auth = RPCAUTH_NONE;
4117 } else if (req->r_np && (req->r_np->n_auth != RPCAUTH_INVALID)) {
4118 req->r_auth = req->r_np->n_auth;
4119 } else {
4120 req->r_auth = nmp->nm_auth;
4121 }
4122
4123 lck_mtx_unlock(&nmp->nm_lock);
4124
4125 /* move the request mbuf chain to the nfsreq */
4126 req->r_mrest = nmrest->nmc_mhead;
4127 nmrest->nmc_mhead = NULL;
4128
4129 req->r_flags |= R_INITTED;
4130 req->r_refs = 1;
4131 if (newreq) {
4132 *reqp = req;
4133 }
4134 return 0;
4135 }
4136
4137 /*
4138 * Clean up and free an NFS request structure.
4139 */
4140 void
4141 nfs_request_destroy(struct nfsreq *req)
4142 {
4143 struct nfsmount *nmp;
4144 int clearjbtimeo = 0;
4145
4146 #if CONFIG_NFS_GSS
4147 struct gss_seq *gsp, *ngsp;
4148 #endif
4149
4150 if (!req || !(req->r_flags & R_INITTED)) {
4151 return;
4152 }
4153 nmp = req->r_nmp;
4154 req->r_flags &= ~R_INITTED;
4155 if (req->r_lflags & RL_QUEUED) {
4156 nfs_reqdequeue(req);
4157 }
4158
4159 if (req->r_achain.tqe_next != NFSREQNOLIST) {
4160 /*
4161 * Still on an async I/O queue?
4162 * %%% But which one, we may be on a local iod.
4163 */
4164 lck_mtx_lock(nfsiod_mutex);
4165 if (nmp && req->r_achain.tqe_next != NFSREQNOLIST) {
4166 TAILQ_REMOVE(&nmp->nm_iodq, req, r_achain);
4167 req->r_achain.tqe_next = NFSREQNOLIST;
4168 }
4169 lck_mtx_unlock(nfsiod_mutex);
4170 }
4171
4172 lck_mtx_lock(&req->r_mtx);
4173 if (nmp) {
4174 lck_mtx_lock(&nmp->nm_lock);
4175 if (req->r_flags & R_CWND) {
4176 /* Decrement the outstanding request count. */
4177 req->r_flags &= ~R_CWND;
4178 nmp->nm_sent -= NFS_CWNDSCALE;
4179 if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) {
4180 /* congestion window is open, poke the cwnd queue */
4181 struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq);
4182 TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain);
4183 req2->r_cchain.tqe_next = NFSREQNOLIST;
4184 wakeup(req2);
4185 }
4186 }
4187 /* XXX should we just remove this conditional, we should have a reference if we're resending */
4188 if ((req->r_flags & R_RESENDQ) && req->r_rchain.tqe_next != NFSREQNOLIST) {
4189 TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
4190 req->r_flags &= ~R_RESENDQ;
4191 req->r_rchain.tqe_next = NFSREQNOLIST;
4192 }
4193 if (req->r_cchain.tqe_next != NFSREQNOLIST) {
4194 TAILQ_REMOVE(&nmp->nm_cwndq, req, r_cchain);
4195 req->r_cchain.tqe_next = NFSREQNOLIST;
4196 }
4197 if (req->r_flags & R_JBTPRINTFMSG) {
4198 req->r_flags &= ~R_JBTPRINTFMSG;
4199 nmp->nm_jbreqs--;
4200 clearjbtimeo = (nmp->nm_jbreqs == 0) ? NFSSTA_JUKEBOXTIMEO : 0;
4201 }
4202 lck_mtx_unlock(&nmp->nm_lock);
4203 }
4204 lck_mtx_unlock(&req->r_mtx);
4205
4206 if (clearjbtimeo) {
4207 nfs_up(nmp, req->r_thread, clearjbtimeo, NULL);
4208 }
4209 if (req->r_mhead) {
4210 mbuf_freem(req->r_mhead);
4211 } else if (req->r_mrest) {
4212 mbuf_freem(req->r_mrest);
4213 }
4214 if (req->r_nmrep.nmc_mhead) {
4215 mbuf_freem(req->r_nmrep.nmc_mhead);
4216 }
4217 if (IS_VALID_CRED(req->r_cred)) {
4218 kauth_cred_unref(&req->r_cred);
4219 }
4220 #if CONFIG_NFS_GSS
4221 if (nfs_request_using_gss(req)) {
4222 nfs_gss_clnt_rpcdone(req);
4223 }
4224 SLIST_FOREACH_SAFE(gsp, &req->r_gss_seqlist, gss_seqnext, ngsp)
4225 FREE(gsp, M_TEMP);
4226 if (req->r_gss_ctx) {
4227 nfs_gss_clnt_ctx_unref(req);
4228 }
4229 #endif /* CONFIG_NFS_GSS */
4230 if (req->r_wrongsec) {
4231 FREE(req->r_wrongsec, M_TEMP);
4232 }
4233 if (nmp) {
4234 nfs_mount_rele(nmp);
4235 }
4236 lck_mtx_destroy(&req->r_mtx, nfs_request_grp);
4237 if (req->r_flags & R_ALLOCATED) {
4238 NFS_ZFREE(nfs_req_zone, req);
4239 }
4240 }
4241
4242 void
4243 nfs_request_ref(struct nfsreq *req, int locked)
4244 {
4245 if (!locked) {
4246 lck_mtx_lock(&req->r_mtx);
4247 }
4248 if (req->r_refs <= 0) {
4249 panic("nfsreq reference error");
4250 }
4251 req->r_refs++;
4252 if (!locked) {
4253 lck_mtx_unlock(&req->r_mtx);
4254 }
4255 }
4256
4257 void
4258 nfs_request_rele(struct nfsreq *req)
4259 {
4260 int destroy;
4261
4262 lck_mtx_lock(&req->r_mtx);
4263 if (req->r_refs <= 0) {
4264 panic("nfsreq reference underflow");
4265 }
4266 req->r_refs--;
4267 destroy = (req->r_refs == 0);
4268 lck_mtx_unlock(&req->r_mtx);
4269 if (destroy) {
4270 nfs_request_destroy(req);
4271 }
4272 }
4273
4274
4275 /*
4276 * Add an (updated) RPC header with authorization to an NFS request.
4277 */
4278 int
4279 nfs_request_add_header(struct nfsreq *req)
4280 {
4281 struct nfsmount *nmp;
4282 int error = 0;
4283 mbuf_t m;
4284
4285 /* free up any previous header */
4286 if ((m = req->r_mhead)) {
4287 while (m && (m != req->r_mrest)) {
4288 m = mbuf_free(m);
4289 }
4290 req->r_mhead = NULL;
4291 }
4292
4293 nmp = req->r_nmp;
4294 if (nfs_mount_gone(nmp)) {
4295 return ENXIO;
4296 }
4297
4298 error = nfsm_rpchead(req, req->r_mrest, &req->r_xid, &req->r_mhead);
4299 if (error) {
4300 return error;
4301 }
4302
4303 req->r_mreqlen = mbuf_pkthdr_len(req->r_mhead);
4304 nmp = req->r_nmp;
4305 if (nfs_mount_gone(nmp)) {
4306 return ENXIO;
4307 }
4308 lck_mtx_lock(&nmp->nm_lock);
4309 if (NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) {
4310 req->r_retry = nmp->nm_retry;
4311 } else {
4312 req->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */
4313 }
4314 lck_mtx_unlock(&nmp->nm_lock);
4315
4316 return error;
4317 }
4318
4319
4320 /*
4321 * Queue an NFS request up and send it out.
4322 */
4323 int
4324 nfs_request_send(struct nfsreq *req, int wait)
4325 {
4326 struct nfsmount *nmp;
4327 struct timeval now;
4328
4329 lck_mtx_lock(&req->r_mtx);
4330 req->r_flags |= R_SENDING;
4331 lck_mtx_unlock(&req->r_mtx);
4332
4333 lck_mtx_lock(nfs_request_mutex);
4334
4335 nmp = req->r_nmp;
4336 if (nfs_mount_gone(nmp)) {
4337 lck_mtx_unlock(nfs_request_mutex);
4338 return ENXIO;
4339 }
4340
4341 microuptime(&now);
4342 if (!req->r_start) {
4343 req->r_start = now.tv_sec;
4344 req->r_lastmsg = now.tv_sec -
4345 ((nmp->nm_tprintf_delay) - (nmp->nm_tprintf_initial_delay));
4346 }
4347
4348 OSAddAtomic64(1, &nfsstats.rpcrequests);
4349
4350 /*
4351 * Make sure the request is not in the queue.
4352 */
4353 if (req->r_lflags & RL_QUEUED) {
4354 #if DEVELOPMENT
4355 panic("nfs_request_send: req %p is already in global requests queue", req);
4356 #else
4357 TAILQ_REMOVE(&nfs_reqq, req, r_chain);
4358 req->r_lflags &= ~RL_QUEUED;
4359 #endif /* DEVELOPMENT */
4360 }
4361
4362 /*
4363 * Chain request into list of outstanding requests. Be sure
4364 * to put it LAST so timer finds oldest requests first.
4365 * Make sure that the request queue timer is running
4366 * to check for possible request timeout.
4367 */
4368 TAILQ_INSERT_TAIL(&nfs_reqq, req, r_chain);
4369 req->r_lflags |= RL_QUEUED;
4370 if (!nfs_request_timer_on) {
4371 nfs_request_timer_on = 1;
4372 nfs_interval_timer_start(nfs_request_timer_call,
4373 NFS_REQUESTDELAY);
4374 }
4375 lck_mtx_unlock(nfs_request_mutex);
4376
4377 /* Send the request... */
4378 return nfs_send(req, wait);
4379 }
4380
4381 /*
4382 * Call nfs_wait_reply() to wait for the reply.
4383 */
4384 void
4385 nfs_request_wait(struct nfsreq *req)
4386 {
4387 req->r_error = nfs_wait_reply(req);
4388 }
4389
4390 /*
4391 * Finish up an NFS request by dequeueing it and
4392 * doing the initial NFS request reply processing.
4393 */
4394 int
4395 nfs_request_finish(
4396 struct nfsreq *req,
4397 struct nfsm_chain *nmrepp,
4398 int *status)
4399 {
4400 struct nfsmount *nmp;
4401 mbuf_t mrep;
4402 int verf_type = 0;
4403 uint32_t verf_len = 0;
4404 uint32_t reply_status = 0;
4405 uint32_t rejected_status = 0;
4406 uint32_t auth_status = 0;
4407 uint32_t accepted_status = 0;
4408 struct nfsm_chain nmrep;
4409 int error, clearjbtimeo;
4410
4411 error = req->r_error;
4412
4413 if (nmrepp) {
4414 nmrepp->nmc_mhead = NULL;
4415 }
4416
4417 /* RPC done, unlink the request. */
4418 nfs_reqdequeue(req);
4419
4420 mrep = req->r_nmrep.nmc_mhead;
4421
4422 nmp = req->r_nmp;
4423
4424 if ((req->r_flags & R_CWND) && nmp) {
4425 /*
4426 * Decrement the outstanding request count.
4427 */
4428 req->r_flags &= ~R_CWND;
4429 lck_mtx_lock(&nmp->nm_lock);
4430 FSDBG(273, R_XID32(req->r_xid), req, nmp->nm_sent, nmp->nm_cwnd);
4431 nmp->nm_sent -= NFS_CWNDSCALE;
4432 if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) {
4433 /* congestion window is open, poke the cwnd queue */
4434 struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq);
4435 TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain);
4436 req2->r_cchain.tqe_next = NFSREQNOLIST;
4437 wakeup(req2);
4438 }
4439 lck_mtx_unlock(&nmp->nm_lock);
4440 }
4441
4442 #if CONFIG_NFS_GSS
4443 if (nfs_request_using_gss(req)) {
4444 /*
4445 * If the request used an RPCSEC_GSS credential
4446 * then reset its sequence number bit in the
4447 * request window.
4448 */
4449 nfs_gss_clnt_rpcdone(req);
4450
4451 /*
4452 * If we need to re-send, go back and re-build the
4453 * request based on a new sequence number.
4454 * Note that we're using the original XID.
4455 */
4456 if (error == EAGAIN) {
4457 req->r_error = 0;
4458 if (mrep) {
4459 mbuf_freem(mrep);
4460 }
4461 error = nfs_gss_clnt_args_restore(req); // remove any trailer mbufs
4462 req->r_nmrep.nmc_mhead = NULL;
4463 req->r_flags |= R_RESTART;
4464 if (error == ENEEDAUTH) {
4465 req->r_xid = 0; // get a new XID
4466 error = 0;
4467 }
4468 goto nfsmout;
4469 }
4470 }
4471 #endif /* CONFIG_NFS_GSS */
4472
4473 /*
4474 * If there was a successful reply, make sure to mark the mount as up.
4475 * If a tprintf message was given (or if this is a timed-out soft mount)
4476 * then post a tprintf message indicating the server is alive again.
4477 */
4478 if (!error) {
4479 if ((req->r_flags & R_TPRINTFMSG) ||
4480 (nmp && (NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) &&
4481 ((nmp->nm_state & (NFSSTA_TIMEO | NFSSTA_FORCE | NFSSTA_DEAD)) == NFSSTA_TIMEO))) {
4482 nfs_up(nmp, req->r_thread, NFSSTA_TIMEO, "is alive again");
4483 } else {
4484 nfs_up(nmp, req->r_thread, NFSSTA_TIMEO, NULL);
4485 }
4486 }
4487 if (!error && !nmp) {
4488 error = ENXIO;
4489 }
4490 nfsmout_if(error);
4491
4492 /*
4493 * break down the RPC header and check if ok
4494 */
4495 nmrep = req->r_nmrep;
4496 nfsm_chain_get_32(error, &nmrep, reply_status);
4497 nfsmout_if(error);
4498 if (reply_status == RPC_MSGDENIED) {
4499 nfsm_chain_get_32(error, &nmrep, rejected_status);
4500 nfsmout_if(error);
4501 if (rejected_status == RPC_MISMATCH) {
4502 error = ENOTSUP;
4503 goto nfsmout;
4504 }
4505 nfsm_chain_get_32(error, &nmrep, auth_status);
4506 nfsmout_if(error);
4507 switch (auth_status) {
4508 #if CONFIG_NFS_GSS
4509 case RPCSEC_GSS_CREDPROBLEM:
4510 case RPCSEC_GSS_CTXPROBLEM:
4511 /*
4512 * An RPCSEC_GSS cred or context problem.
4513 * We can't use it anymore.
4514 * Restore the args, renew the context
4515 * and set up for a resend.
4516 */
4517 error = nfs_gss_clnt_args_restore(req);
4518 if (error && error != ENEEDAUTH) {
4519 break;
4520 }
4521
4522 if (!error) {
4523 error = nfs_gss_clnt_ctx_renew(req);
4524 if (error) {
4525 break;
4526 }
4527 }
4528 mbuf_freem(mrep);
4529 req->r_nmrep.nmc_mhead = NULL;
4530 req->r_xid = 0; // get a new XID
4531 req->r_flags |= R_RESTART;
4532 goto nfsmout;
4533 #endif /* CONFIG_NFS_GSS */
4534 default:
4535 error = EACCES;
4536 break;
4537 }
4538 goto nfsmout;
4539 }
4540
4541 /* Now check the verifier */
4542 nfsm_chain_get_32(error, &nmrep, verf_type); // verifier flavor
4543 nfsm_chain_get_32(error, &nmrep, verf_len); // verifier length
4544 nfsmout_if(error);
4545
4546 switch (req->r_auth) {
4547 case RPCAUTH_NONE:
4548 case RPCAUTH_SYS:
4549 /* Any AUTH_SYS verifier is ignored */
4550 if (verf_len > 0) {
4551 nfsm_chain_adv(error, &nmrep, nfsm_rndup(verf_len));
4552 }
4553 nfsm_chain_get_32(error, &nmrep, accepted_status);
4554 break;
4555 #if CONFIG_NFS_GSS
4556 case RPCAUTH_KRB5:
4557 case RPCAUTH_KRB5I:
4558 case RPCAUTH_KRB5P:
4559 error = nfs_gss_clnt_verf_get(req, &nmrep,
4560 verf_type, verf_len, &accepted_status);
4561 break;
4562 #endif /* CONFIG_NFS_GSS */
4563 }
4564 nfsmout_if(error);
4565
4566 switch (accepted_status) {
4567 case RPC_SUCCESS:
4568 if (req->r_procnum == NFSPROC_NULL) {
4569 /*
4570 * The NFS null procedure is unique,
4571 * in not returning an NFS status.
4572 */
4573 *status = NFS_OK;
4574 } else {
4575 nfsm_chain_get_32(error, &nmrep, *status);
4576 nfsmout_if(error);
4577 }
4578
4579 if ((nmp->nm_vers != NFS_VER2) && (*status == NFSERR_TRYLATER)) {
4580 /*
4581 * It's a JUKEBOX error - delay and try again
4582 */
4583 int delay, slpflag = (NMFLAG(nmp, INTR) && !(req->r_flags & R_NOINTR)) ? PCATCH : 0;
4584
4585 mbuf_freem(mrep);
4586 req->r_nmrep.nmc_mhead = NULL;
4587 if ((req->r_delay >= 30) && !(nmp->nm_state & NFSSTA_MOUNTED)) {
4588 /* we're not yet completely mounted and */
4589 /* we can't complete an RPC, so we fail */
4590 OSAddAtomic64(1, &nfsstats.rpctimeouts);
4591 nfs_softterm(req);
4592 error = req->r_error;
4593 goto nfsmout;
4594 }
4595 req->r_delay = !req->r_delay ? NFS_TRYLATERDEL : (req->r_delay * 2);
4596 if (req->r_delay > 30) {
4597 req->r_delay = 30;
4598 }
4599 if (nmp->nm_tprintf_initial_delay && (req->r_delay >= nmp->nm_tprintf_initial_delay)) {
4600 if (!(req->r_flags & R_JBTPRINTFMSG)) {
4601 req->r_flags |= R_JBTPRINTFMSG;
4602 lck_mtx_lock(&nmp->nm_lock);
4603 nmp->nm_jbreqs++;
4604 lck_mtx_unlock(&nmp->nm_lock);
4605 }
4606 nfs_down(req->r_nmp, req->r_thread, 0, NFSSTA_JUKEBOXTIMEO,
4607 "resource temporarily unavailable (jukebox)", 0);
4608 }
4609 if ((NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && (req->r_delay == 30) &&
4610 !(req->r_flags & R_NOINTR)) {
4611 /* for soft mounts, just give up after a short while */
4612 OSAddAtomic64(1, &nfsstats.rpctimeouts);
4613 nfs_softterm(req);
4614 error = req->r_error;
4615 goto nfsmout;
4616 }
4617 delay = req->r_delay;
4618 if (req->r_callback.rcb_func) {
4619 struct timeval now;
4620 microuptime(&now);
4621 req->r_resendtime = now.tv_sec + delay;
4622 } else {
4623 do {
4624 if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) {
4625 goto nfsmout;
4626 }
4627 tsleep(nfs_request_finish, PSOCK | slpflag, "nfs_jukebox_trylater", hz);
4628 slpflag = 0;
4629 } while (--delay > 0);
4630 }
4631 req->r_xid = 0; // get a new XID
4632 req->r_flags |= R_RESTART;
4633 req->r_start = 0;
4634 FSDBG(273, R_XID32(req->r_xid), nmp, req, NFSERR_TRYLATER);
4635 return 0;
4636 }
4637
4638 if (req->r_flags & R_JBTPRINTFMSG) {
4639 req->r_flags &= ~R_JBTPRINTFMSG;
4640 lck_mtx_lock(&nmp->nm_lock);
4641 nmp->nm_jbreqs--;
4642 clearjbtimeo = (nmp->nm_jbreqs == 0) ? NFSSTA_JUKEBOXTIMEO : 0;
4643 lck_mtx_unlock(&nmp->nm_lock);
4644 nfs_up(nmp, req->r_thread, clearjbtimeo, "resource available again");
4645 }
4646
4647 #if CONFIG_NFS4
4648 if ((nmp->nm_vers >= NFS_VER4) && (*status == NFSERR_WRONGSEC)) {
4649 /*
4650 * Hmmm... we need to try a different security flavor.
4651 * The first time a request hits this, we will allocate an array
4652 * to track flavors to try. We fill the array with the mount's
4653 * preferred flavors or the server's preferred flavors or just the
4654 * flavors we support.
4655 */
4656 uint32_t srvflavors[NX_MAX_SEC_FLAVORS];
4657 int srvcount, i, j;
4658
4659 /* Call SECINFO to try to get list of flavors from server. */
4660 srvcount = NX_MAX_SEC_FLAVORS;
4661 nfs4_secinfo_rpc(nmp, &req->r_secinfo, req->r_cred, srvflavors, &srvcount);
4662
4663 if (!req->r_wrongsec) {
4664 /* first time... set up flavor array */
4665 MALLOC(req->r_wrongsec, uint32_t*, NX_MAX_SEC_FLAVORS * sizeof(uint32_t), M_TEMP, M_WAITOK);
4666 if (!req->r_wrongsec) {
4667 error = EACCES;
4668 goto nfsmout;
4669 }
4670 i = 0;
4671 if (nmp->nm_sec.count) { /* use the mount's preferred list of flavors */
4672 for (; i < nmp->nm_sec.count; i++) {
4673 req->r_wrongsec[i] = nmp->nm_sec.flavors[i];
4674 }
4675 } else if (srvcount) { /* otherwise use the server's list of flavors */
4676 for (; i < srvcount; i++) {
4677 req->r_wrongsec[i] = srvflavors[i];
4678 }
4679 } else { /* otherwise, just try the flavors we support. */
4680 req->r_wrongsec[i++] = RPCAUTH_KRB5P;
4681 req->r_wrongsec[i++] = RPCAUTH_KRB5I;
4682 req->r_wrongsec[i++] = RPCAUTH_KRB5;
4683 req->r_wrongsec[i++] = RPCAUTH_SYS;
4684 req->r_wrongsec[i++] = RPCAUTH_NONE;
4685 }
4686 for (; i < NX_MAX_SEC_FLAVORS; i++) { /* invalidate any remaining slots */
4687 req->r_wrongsec[i] = RPCAUTH_INVALID;
4688 }
4689 }
4690
4691 /* clear the current flavor from the list */
4692 for (i = 0; i < NX_MAX_SEC_FLAVORS; i++) {
4693 if (req->r_wrongsec[i] == req->r_auth) {
4694 req->r_wrongsec[i] = RPCAUTH_INVALID;
4695 }
4696 }
4697
4698 /* find the next flavor to try */
4699 for (i = 0; i < NX_MAX_SEC_FLAVORS; i++) {
4700 if (req->r_wrongsec[i] != RPCAUTH_INVALID) {
4701 if (!srvcount) { /* no server list, just try it */
4702 break;
4703 }
4704 /* check that it's in the server's list */
4705 for (j = 0; j < srvcount; j++) {
4706 if (req->r_wrongsec[i] == srvflavors[j]) {
4707 break;
4708 }
4709 }
4710 if (j < srvcount) { /* found */
4711 break;
4712 }
4713 /* not found in server list */
4714 req->r_wrongsec[i] = RPCAUTH_INVALID;
4715 }
4716 }
4717 if (i == NX_MAX_SEC_FLAVORS) {
4718 /* nothing left to try! */
4719 error = EACCES;
4720 goto nfsmout;
4721 }
4722
4723 /* retry with the next auth flavor */
4724 req->r_auth = req->r_wrongsec[i];
4725 req->r_xid = 0; // get a new XID
4726 req->r_flags |= R_RESTART;
4727 req->r_start = 0;
4728 FSDBG(273, R_XID32(req->r_xid), nmp, req, NFSERR_WRONGSEC);
4729 return 0;
4730 }
4731 if ((nmp->nm_vers >= NFS_VER4) && req->r_wrongsec) {
4732 /*
4733 * We renegotiated security for this request; so update the
4734 * default security flavor for the associated node.
4735 */
4736 if (req->r_np) {
4737 req->r_np->n_auth = req->r_auth;
4738 }
4739 }
4740 #endif /* CONFIG_NFS4 */
4741 if (*status == NFS_OK) {
4742 /*
4743 * Successful NFS request
4744 */
4745 *nmrepp = nmrep;
4746 req->r_nmrep.nmc_mhead = NULL;
4747 break;
4748 }
4749 /* Got an NFS error of some kind */
4750
4751 /*
4752 * If the File Handle was stale, invalidate the
4753 * lookup cache, just in case.
4754 */
4755 if ((*status == ESTALE) && req->r_np) {
4756 cache_purge(NFSTOV(req->r_np));
4757 /* if monitored, also send delete event */
4758 if (vnode_ismonitored(NFSTOV(req->r_np))) {
4759 nfs_vnode_notify(req->r_np, (VNODE_EVENT_ATTRIB | VNODE_EVENT_DELETE));
4760 }
4761 }
4762 if (nmp->nm_vers == NFS_VER2) {
4763 mbuf_freem(mrep);
4764 } else {
4765 *nmrepp = nmrep;
4766 }
4767 req->r_nmrep.nmc_mhead = NULL;
4768 error = 0;
4769 break;
4770 case RPC_PROGUNAVAIL:
4771 error = EPROGUNAVAIL;
4772 break;
4773 case RPC_PROGMISMATCH:
4774 error = ERPCMISMATCH;
4775 break;
4776 case RPC_PROCUNAVAIL:
4777 error = EPROCUNAVAIL;
4778 break;
4779 case RPC_GARBAGE:
4780 error = EBADRPC;
4781 break;
4782 case RPC_SYSTEM_ERR:
4783 default:
4784 error = EIO;
4785 break;
4786 }
4787 nfsmout:
4788 if (req->r_flags & R_JBTPRINTFMSG) {
4789 req->r_flags &= ~R_JBTPRINTFMSG;
4790 lck_mtx_lock(&nmp->nm_lock);
4791 nmp->nm_jbreqs--;
4792 clearjbtimeo = (nmp->nm_jbreqs == 0) ? NFSSTA_JUKEBOXTIMEO : 0;
4793 lck_mtx_unlock(&nmp->nm_lock);
4794 if (clearjbtimeo) {
4795 nfs_up(nmp, req->r_thread, clearjbtimeo, NULL);
4796 }
4797 }
4798 FSDBG(273, R_XID32(req->r_xid), nmp, req,
4799 (!error && (*status == NFS_OK)) ? 0xf0f0f0f0 : error);
4800 return error;
4801 }
4802
4803 /*
4804 * NFS request using a GSS/Kerberos security flavor?
4805 */
4806 int
4807 nfs_request_using_gss(struct nfsreq *req)
4808 {
4809 if (!req->r_gss_ctx) {
4810 return 0;
4811 }
4812 switch (req->r_auth) {
4813 case RPCAUTH_KRB5:
4814 case RPCAUTH_KRB5I:
4815 case RPCAUTH_KRB5P:
4816 return 1;
4817 }
4818 return 0;
4819 }
4820
4821 /*
4822 * Perform an NFS request synchronously.
4823 */
4824
4825 int
4826 nfs_request(
4827 nfsnode_t np,
4828 mount_t mp, /* used only if !np */
4829 struct nfsm_chain *nmrest,
4830 int procnum,
4831 vfs_context_t ctx,
4832 struct nfsreq_secinfo_args *si,
4833 struct nfsm_chain *nmrepp,
4834 u_int64_t *xidp,
4835 int *status)
4836 {
4837 return nfs_request2(np, mp, nmrest, procnum,
4838 vfs_context_thread(ctx), vfs_context_ucred(ctx),
4839 si, 0, nmrepp, xidp, status);
4840 }
4841
4842 int
4843 nfs_request2(
4844 nfsnode_t np,
4845 mount_t mp, /* used only if !np */
4846 struct nfsm_chain *nmrest,
4847 int procnum,
4848 thread_t thd,
4849 kauth_cred_t cred,
4850 struct nfsreq_secinfo_args *si,
4851 int flags,
4852 struct nfsm_chain *nmrepp,
4853 u_int64_t *xidp,
4854 int *status)
4855 {
4856 struct nfsreq *req;
4857 int error;
4858
4859 req = zalloc_flags(nfs_req_zone, Z_WAITOK);
4860 if ((error = nfs_request_create(np, mp, nmrest, procnum, thd, cred, &req))) {
4861 goto out_free;
4862 }
4863 req->r_flags |= (flags & (R_OPTMASK | R_SOFT));
4864 if (si) {
4865 req->r_secinfo = *si;
4866 }
4867
4868 FSDBG_TOP(273, R_XID32(req->r_xid), np, procnum, 0);
4869 do {
4870 req->r_error = 0;
4871 req->r_flags &= ~R_RESTART;
4872 if ((error = nfs_request_add_header(req))) {
4873 break;
4874 }
4875 if (xidp) {
4876 *xidp = req->r_xid;
4877 }
4878 if ((error = nfs_request_send(req, 1))) {
4879 break;
4880 }
4881 nfs_request_wait(req);
4882 if ((error = nfs_request_finish(req, nmrepp, status))) {
4883 break;
4884 }
4885 } while (req->r_flags & R_RESTART);
4886
4887 FSDBG_BOT(273, R_XID32(req->r_xid), np, procnum, error);
4888 nfs_request_rele(req);
4889 out_free:
4890 NFS_ZFREE(nfs_req_zone, req);
4891 return error;
4892 }
4893
4894
4895 #if CONFIG_NFS_GSS
4896 /*
4897 * Set up a new null proc request to exchange GSS context tokens with the
4898 * server. Associate the context that we are setting up with the request that we
4899 * are sending.
4900 */
4901
4902 int
4903 nfs_request_gss(
4904 mount_t mp,
4905 struct nfsm_chain *nmrest,
4906 thread_t thd,
4907 kauth_cred_t cred,
4908 int flags,
4909 struct nfs_gss_clnt_ctx *cp, /* Set to gss context to renew or setup */
4910 struct nfsm_chain *nmrepp,
4911 int *status)
4912 {
4913 struct nfsreq *req;
4914 int error, wait = 1;
4915
4916 req = zalloc_flags(nfs_req_zone, Z_WAITOK);
4917 if ((error = nfs_request_create(NULL, mp, nmrest, NFSPROC_NULL, thd, cred, &req))) {
4918 goto out_free;
4919 }
4920 req->r_flags |= (flags & R_OPTMASK);
4921
4922 if (cp == NULL) {
4923 printf("nfs_request_gss request has no context\n");
4924 nfs_request_rele(req);
4925 error = NFSERR_EAUTH;
4926 goto out_free;
4927 }
4928 nfs_gss_clnt_ctx_ref(req, cp);
4929
4930 /*
4931 * Don't wait for a reply to a context destroy advisory
4932 * to avoid hanging on a dead server.
4933 */
4934 if (cp->gss_clnt_proc == RPCSEC_GSS_DESTROY) {
4935 wait = 0;
4936 }
4937
4938 FSDBG_TOP(273, R_XID32(req->r_xid), NULL, NFSPROC_NULL, 0);
4939 do {
4940 req->r_error = 0;
4941 req->r_flags &= ~R_RESTART;
4942 if ((error = nfs_request_add_header(req))) {
4943 break;
4944 }
4945
4946 if ((error = nfs_request_send(req, wait))) {
4947 break;
4948 }
4949 if (!wait) {
4950 break;
4951 }
4952
4953 nfs_request_wait(req);
4954 if ((error = nfs_request_finish(req, nmrepp, status))) {
4955 break;
4956 }
4957 } while (req->r_flags & R_RESTART);
4958
4959 FSDBG_BOT(273, R_XID32(req->r_xid), NULL, NFSPROC_NULL, error);
4960
4961 nfs_gss_clnt_ctx_unref(req);
4962 nfs_request_rele(req);
4963 out_free:
4964 NFS_ZFREE(nfs_req_zone, req);
4965 return error;
4966 }
4967 #endif /* CONFIG_NFS_GSS */
4968
4969 /*
4970 * Create and start an asynchronous NFS request.
4971 */
4972 int
4973 nfs_request_async(
4974 nfsnode_t np,
4975 mount_t mp, /* used only if !np */
4976 struct nfsm_chain *nmrest,
4977 int procnum,
4978 thread_t thd,
4979 kauth_cred_t cred,
4980 struct nfsreq_secinfo_args *si,
4981 int flags,
4982 struct nfsreq_cbinfo *cb,
4983 struct nfsreq **reqp)
4984 {
4985 struct nfsreq *req;
4986 struct nfsmount *nmp;
4987 int error, sent;
4988
4989 error = nfs_request_create(np, mp, nmrest, procnum, thd, cred, reqp);
4990 req = *reqp;
4991 FSDBG(274, (req ? R_XID32(req->r_xid) : 0), np, procnum, error);
4992 if (error) {
4993 return error;
4994 }
4995 req->r_flags |= (flags & R_OPTMASK);
4996 req->r_flags |= R_ASYNC;
4997 if (si) {
4998 req->r_secinfo = *si;
4999 }
5000 if (cb) {
5001 req->r_callback = *cb;
5002 }
5003 error = nfs_request_add_header(req);
5004 if (!error) {
5005 req->r_flags |= R_WAITSENT;
5006 if (req->r_callback.rcb_func) {
5007 nfs_request_ref(req, 0);
5008 }
5009 error = nfs_request_send(req, 1);
5010 lck_mtx_lock(&req->r_mtx);
5011 if (!error && !(req->r_flags & R_SENT) && req->r_callback.rcb_func) {
5012 /* make sure to wait until this async I/O request gets sent */
5013 int slpflag = (req->r_nmp && NMFLAG(req->r_nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) ? PCATCH : 0;
5014 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
5015 while (!(req->r_flags & R_SENT)) {
5016 nmp = req->r_nmp;
5017 if ((req->r_flags & R_RESENDQ) && !nfs_mount_gone(nmp)) {
5018 lck_mtx_lock(&nmp->nm_lock);
5019 if ((req->r_flags & R_RESENDQ) && (nmp->nm_state & NFSSTA_RECOVER) && (req->r_rchain.tqe_next != NFSREQNOLIST)) {
5020 /*
5021 * It's not going to get off the resend queue if we're in recovery.
5022 * So, just take it off ourselves. We could be holding mount state
5023 * busy and thus holding up the start of recovery.
5024 */
5025 TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
5026 req->r_flags &= ~R_RESENDQ;
5027 req->r_rchain.tqe_next = NFSREQNOLIST;
5028 lck_mtx_unlock(&nmp->nm_lock);
5029 req->r_flags |= R_SENDING;
5030 lck_mtx_unlock(&req->r_mtx);
5031 error = nfs_send(req, 1);
5032 /* Remove the R_RESENDQ reference */
5033 nfs_request_rele(req);
5034 lck_mtx_lock(&req->r_mtx);
5035 if (error) {
5036 break;
5037 }
5038 continue;
5039 }
5040 lck_mtx_unlock(&nmp->nm_lock);
5041 }
5042 if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) {
5043 break;
5044 }
5045 msleep(req, &req->r_mtx, slpflag | (PZERO - 1), "nfswaitsent", &ts);
5046 slpflag = 0;
5047 }
5048 }
5049 sent = req->r_flags & R_SENT;
5050 lck_mtx_unlock(&req->r_mtx);
5051 if (error && req->r_callback.rcb_func && !sent) {
5052 nfs_request_rele(req);
5053 }
5054 }
5055 FSDBG(274, R_XID32(req->r_xid), np, procnum, error);
5056 if (error || req->r_callback.rcb_func) {
5057 nfs_request_rele(req);
5058 }
5059
5060 return error;
5061 }
5062
5063 /*
5064 * Wait for and finish an asynchronous NFS request.
5065 */
5066 int
5067 nfs_request_async_finish(
5068 struct nfsreq *req,
5069 struct nfsm_chain *nmrepp,
5070 u_int64_t *xidp,
5071 int *status)
5072 {
5073 int error = 0, asyncio = req->r_callback.rcb_func ? 1 : 0;
5074 struct nfsmount *nmp;
5075
5076 lck_mtx_lock(&req->r_mtx);
5077 if (!asyncio) {
5078 req->r_flags |= R_ASYNCWAIT;
5079 }
5080 while (req->r_flags & R_RESENDQ) { /* wait until the request is off the resend queue */
5081 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
5082
5083 if ((nmp = req->r_nmp)) {
5084 lck_mtx_lock(&nmp->nm_lock);
5085 if ((req->r_flags & R_RESENDQ) && (nmp->nm_state & NFSSTA_RECOVER) && (req->r_rchain.tqe_next != NFSREQNOLIST)) {
5086 /*
5087 * It's not going to get off the resend queue if we're in recovery.
5088 * So, just take it off ourselves. We could be holding mount state
5089 * busy and thus holding up the start of recovery.
5090 */
5091 TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
5092 req->r_flags &= ~R_RESENDQ;
5093 req->r_rchain.tqe_next = NFSREQNOLIST;
5094 /* Remove the R_RESENDQ reference */
5095 assert(req->r_refs > 0);
5096 req->r_refs--;
5097 lck_mtx_unlock(&nmp->nm_lock);
5098 break;
5099 }
5100 lck_mtx_unlock(&nmp->nm_lock);
5101 }
5102 if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) {
5103 break;
5104 }
5105 msleep(req, &req->r_mtx, PZERO - 1, "nfsresendqwait", &ts);
5106 }
5107 lck_mtx_unlock(&req->r_mtx);
5108
5109 if (!error) {
5110 nfs_request_wait(req);
5111 error = nfs_request_finish(req, nmrepp, status);
5112 }
5113
5114 while (!error && (req->r_flags & R_RESTART)) {
5115 if (asyncio) {
5116 assert(req->r_achain.tqe_next == NFSREQNOLIST);
5117 lck_mtx_lock(&req->r_mtx);
5118 req->r_flags &= ~R_IOD;
5119 if (req->r_resendtime) { /* send later */
5120 nfs_asyncio_resend(req);
5121 lck_mtx_unlock(&req->r_mtx);
5122 return EINPROGRESS;
5123 }
5124 lck_mtx_unlock(&req->r_mtx);
5125 }
5126 req->r_error = 0;
5127 req->r_flags &= ~R_RESTART;
5128 if ((error = nfs_request_add_header(req))) {
5129 break;
5130 }
5131 if ((error = nfs_request_send(req, !asyncio))) {
5132 break;
5133 }
5134 if (asyncio) {
5135 return EINPROGRESS;
5136 }
5137 nfs_request_wait(req);
5138 if ((error = nfs_request_finish(req, nmrepp, status))) {
5139 break;
5140 }
5141 }
5142 if (xidp) {
5143 *xidp = req->r_xid;
5144 }
5145
5146 FSDBG(275, R_XID32(req->r_xid), req->r_np, req->r_procnum, error);
5147 nfs_request_rele(req);
5148 return error;
5149 }
5150
5151 /*
5152 * Cancel a pending asynchronous NFS request.
5153 */
5154 void
5155 nfs_request_async_cancel(struct nfsreq *req)
5156 {
5157 FSDBG(275, R_XID32(req->r_xid), req->r_np, req->r_procnum, 0xD1ED1E);
5158 nfs_request_rele(req);
5159 }
5160
5161 /*
5162 * Flag a request as being terminated.
5163 */
5164 void
5165 nfs_softterm(struct nfsreq *req)
5166 {
5167 struct nfsmount *nmp = req->r_nmp;
5168 req->r_flags |= R_SOFTTERM;
5169 req->r_error = ETIMEDOUT;
5170 if (!(req->r_flags & R_CWND) || nfs_mount_gone(nmp)) {
5171 return;
5172 }
5173 /* update congestion window */
5174 req->r_flags &= ~R_CWND;
5175 lck_mtx_lock(&nmp->nm_lock);
5176 FSDBG(532, R_XID32(req->r_xid), req, nmp->nm_sent, nmp->nm_cwnd);
5177 nmp->nm_sent -= NFS_CWNDSCALE;
5178 if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) {
5179 /* congestion window is open, poke the cwnd queue */
5180 struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq);
5181 TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain);
5182 req2->r_cchain.tqe_next = NFSREQNOLIST;
5183 wakeup(req2);
5184 }
5185 lck_mtx_unlock(&nmp->nm_lock);
5186 }
5187
5188 /*
5189 * Ensure req isn't in use by the timer, then dequeue it.
5190 */
5191 void
5192 nfs_reqdequeue(struct nfsreq *req)
5193 {
5194 lck_mtx_lock(nfs_request_mutex);
5195 while (req->r_lflags & RL_BUSY) {
5196 req->r_lflags |= RL_WAITING;
5197 msleep(&req->r_lflags, nfs_request_mutex, PSOCK, "reqdeq", NULL);
5198 }
5199 if (req->r_lflags & RL_QUEUED) {
5200 TAILQ_REMOVE(&nfs_reqq, req, r_chain);
5201 req->r_lflags &= ~RL_QUEUED;
5202 }
5203 lck_mtx_unlock(nfs_request_mutex);
5204 }
5205
5206 /*
5207 * Busy (lock) a nfsreq, used by the nfs timer to make sure it's not
5208 * free()'d out from under it.
5209 */
5210 void
5211 nfs_reqbusy(struct nfsreq *req)
5212 {
5213 if (req->r_lflags & RL_BUSY) {
5214 panic("req locked");
5215 }
5216 req->r_lflags |= RL_BUSY;
5217 }
5218
5219 /*
5220 * Unbusy the nfsreq passed in, return the next nfsreq in the chain busied.
5221 */
5222 struct nfsreq *
5223 nfs_reqnext(struct nfsreq *req)
5224 {
5225 struct nfsreq * nextreq;
5226
5227 if (req == NULL) {
5228 return NULL;
5229 }
5230 /*
5231 * We need to get and busy the next req before signalling the
5232 * current one, otherwise wakeup() may block us and we'll race to
5233 * grab the next req.
5234 */
5235 nextreq = TAILQ_NEXT(req, r_chain);
5236 if (nextreq != NULL) {
5237 nfs_reqbusy(nextreq);
5238 }
5239 /* unbusy and signal. */
5240 req->r_lflags &= ~RL_BUSY;
5241 if (req->r_lflags & RL_WAITING) {
5242 req->r_lflags &= ~RL_WAITING;
5243 wakeup(&req->r_lflags);
5244 }
5245 return nextreq;
5246 }
5247
5248 /*
5249 * NFS request queue timer routine
5250 *
5251 * Scan the NFS request queue for any requests that have timed out.
5252 *
5253 * Alert the system of unresponsive servers.
5254 * Mark expired requests on soft mounts as terminated.
5255 * For UDP, mark/signal requests for retransmission.
5256 */
5257 void
5258 nfs_request_timer(__unused void *param0, __unused void *param1)
5259 {
5260 struct nfsreq *req;
5261 struct nfsmount *nmp;
5262 int timeo, maxtime, finish_asyncio, error;
5263 struct timeval now;
5264 TAILQ_HEAD(nfs_mount_pokeq, nfsmount) nfs_mount_poke_queue;
5265 TAILQ_INIT(&nfs_mount_poke_queue);
5266
5267 restart:
5268 lck_mtx_lock(nfs_request_mutex);
5269 req = TAILQ_FIRST(&nfs_reqq);
5270 if (req == NULL) { /* no requests - turn timer off */
5271 nfs_request_timer_on = 0;
5272 lck_mtx_unlock(nfs_request_mutex);
5273 return;
5274 }
5275
5276 nfs_reqbusy(req);
5277
5278 microuptime(&now);
5279 for (; req != NULL; req = nfs_reqnext(req)) {
5280 nmp = req->r_nmp;
5281 if (nmp == NULL) {
5282 NFS_SOCK_DBG("Found a request with out a mount!\n");
5283 continue;
5284 }
5285 if (req->r_error || req->r_nmrep.nmc_mhead) {
5286 continue;
5287 }
5288 if ((error = nfs_sigintr(nmp, req, req->r_thread, 0))) {
5289 if (req->r_callback.rcb_func != NULL) {
5290 /* async I/O RPC needs to be finished */
5291 lck_mtx_lock(&req->r_mtx);
5292 req->r_error = error;
5293 finish_asyncio = !(req->r_flags & R_WAITSENT);
5294 wakeup(req);
5295 lck_mtx_unlock(&req->r_mtx);
5296 if (finish_asyncio) {
5297 nfs_asyncio_finish(req);
5298 }
5299 }
5300 continue;
5301 }
5302
5303 lck_mtx_lock(&req->r_mtx);
5304
5305 if (nmp->nm_tprintf_initial_delay &&
5306 ((req->r_rexmit > 2) || (req->r_flags & R_RESENDERR)) &&
5307 ((req->r_lastmsg + nmp->nm_tprintf_delay) < now.tv_sec)) {
5308 req->r_lastmsg = now.tv_sec;
5309 nfs_down(req->r_nmp, req->r_thread, 0, NFSSTA_TIMEO,
5310 "not responding", 1);
5311 req->r_flags |= R_TPRINTFMSG;
5312 lck_mtx_lock(&nmp->nm_lock);
5313 if (!(nmp->nm_state & NFSSTA_MOUNTED)) {
5314 lck_mtx_unlock(&nmp->nm_lock);
5315 /* we're not yet completely mounted and */
5316 /* we can't complete an RPC, so we fail */
5317 OSAddAtomic64(1, &nfsstats.rpctimeouts);
5318 nfs_softterm(req);
5319 finish_asyncio = ((req->r_callback.rcb_func != NULL) && !(req->r_flags & R_WAITSENT));
5320 wakeup(req);
5321 lck_mtx_unlock(&req->r_mtx);
5322 if (finish_asyncio) {
5323 nfs_asyncio_finish(req);
5324 }
5325 continue;
5326 }
5327 lck_mtx_unlock(&nmp->nm_lock);
5328 }
5329
5330 /*
5331 * Put a reasonable limit on the maximum timeout,
5332 * and reduce that limit when soft mounts get timeouts or are in reconnect.
5333 */
5334 if (!(NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && !nfs_can_squish(nmp)) {
5335 maxtime = NFS_MAXTIMEO;
5336 } else if ((req->r_flags & (R_SETUP | R_RECOVER)) ||
5337 ((nmp->nm_reconnect_start <= 0) || ((now.tv_sec - nmp->nm_reconnect_start) < 8))) {
5338 maxtime = (NFS_MAXTIMEO / (nmp->nm_timeouts + 1)) / 2;
5339 } else {
5340 maxtime = NFS_MINTIMEO / 4;
5341 }
5342
5343 /*
5344 * Check for request timeout.
5345 */
5346 if (req->r_rtt >= 0) {
5347 req->r_rtt++;
5348 lck_mtx_lock(&nmp->nm_lock);
5349 if (req->r_flags & R_RESENDERR) {
5350 /* with resend errors, retry every few seconds */
5351 timeo = 4 * hz;
5352 } else {
5353 if (req->r_procnum == NFSPROC_NULL && req->r_gss_ctx != NULL) {
5354 timeo = NFS_MINIDEMTIMEO; // gss context setup
5355 } else if (NMFLAG(nmp, DUMBTIMER)) {
5356 timeo = nmp->nm_timeo;
5357 } else {
5358 timeo = NFS_RTO(nmp, proct[req->r_procnum]);
5359 }
5360
5361 /* ensure 62.5 ms floor */
5362 while (16 * timeo < hz) {
5363 timeo *= 2;
5364 }
5365 if (nmp->nm_timeouts > 0) {
5366 timeo *= nfs_backoff[nmp->nm_timeouts - 1];
5367 }
5368 }
5369 /* limit timeout to max */
5370 if (timeo > maxtime) {
5371 timeo = maxtime;
5372 }
5373 if (req->r_rtt <= timeo) {
5374 NFS_SOCK_DBG("nfs timeout: req time %d and timeo is %d continue\n", req->r_rtt, timeo);
5375 lck_mtx_unlock(&nmp->nm_lock);
5376 lck_mtx_unlock(&req->r_mtx);
5377 continue;
5378 }
5379 /* The request has timed out */
5380 NFS_SOCK_DBG("nfs timeout: proc %d %d xid %llx rtt %d to %d # %d, t %ld/%d\n",
5381 req->r_procnum, proct[req->r_procnum],
5382 req->r_xid, req->r_rtt, timeo, nmp->nm_timeouts,
5383 (now.tv_sec - req->r_start) * NFS_HZ, maxtime);
5384 if (nmp->nm_timeouts < 8) {
5385 nmp->nm_timeouts++;
5386 }
5387 if (nfs_mount_check_dead_timeout(nmp)) {
5388 /* Unbusy this request */
5389 req->r_lflags &= ~RL_BUSY;
5390 if (req->r_lflags & RL_WAITING) {
5391 req->r_lflags &= ~RL_WAITING;
5392 wakeup(&req->r_lflags);
5393 }
5394 lck_mtx_unlock(&req->r_mtx);
5395
5396 /* No need to poke this mount */
5397 if (nmp->nm_sockflags & NMSOCK_POKE) {
5398 nmp->nm_sockflags &= ~NMSOCK_POKE;
5399 TAILQ_REMOVE(&nfs_mount_poke_queue, nmp, nm_pokeq);
5400 }
5401 /* Release our lock state, so we can become a zombie */
5402 lck_mtx_unlock(nfs_request_mutex);
5403
5404 /*
5405 * Note nfs_mount_make zombie(nmp) must be
5406 * called with nm_lock held. After doing some
5407 * work we release nm_lock in
5408 * nfs_make_mount_zombie with out acquiring any
5409 * other locks. (Later, in nfs_mount_zombie we
5410 * will acquire nfs_request_mutex, r_mtx,
5411 * nm_lock in that order). So we should not be
5412 * introducing deadlock here. We take a reference
5413 * on the mount so that its still there when we
5414 * release the lock.
5415 */
5416 nmp->nm_ref++;
5417 nfs_mount_make_zombie(nmp);
5418 lck_mtx_unlock(&nmp->nm_lock);
5419 nfs_mount_rele(nmp);
5420
5421 /*
5422 * All the request for this mount have now been
5423 * removed from the request queue. Restart to
5424 * process the remaining mounts
5425 */
5426 goto restart;
5427 }
5428
5429 /* if it's been a few seconds, try poking the socket */
5430 if ((nmp->nm_sotype == SOCK_STREAM) &&
5431 ((now.tv_sec - req->r_start) >= 3) &&
5432 !(nmp->nm_sockflags & (NMSOCK_POKE | NMSOCK_UNMOUNT)) &&
5433 (nmp->nm_sockflags & NMSOCK_READY)) {
5434 nmp->nm_sockflags |= NMSOCK_POKE;
5435 /*
5436 * We take a ref on the mount so that we know the mount will still be there
5437 * when we process the nfs_mount_poke_queue. An unmount request will block
5438 * in nfs_mount_drain_and_cleanup until after the poke is finished. We release
5439 * the reference after calling nfs_sock_poke below;
5440 */
5441 nmp->nm_ref++;
5442 TAILQ_INSERT_TAIL(&nfs_mount_poke_queue, nmp, nm_pokeq);
5443 }
5444 lck_mtx_unlock(&nmp->nm_lock);
5445 }
5446
5447 /* For soft mounts (& SETUPs/RECOVERs), check for too many retransmits/timeout. */
5448 if ((NMFLAG(nmp, SOFT) || (req->r_flags & (R_SETUP | R_RECOVER | R_SOFT))) &&
5449 ((req->r_rexmit >= req->r_retry) || /* too many */
5450 ((now.tv_sec - req->r_start) * NFS_HZ > maxtime))) { /* too long */
5451 OSAddAtomic64(1, &nfsstats.rpctimeouts);
5452 lck_mtx_lock(&nmp->nm_lock);
5453 if (!(nmp->nm_state & NFSSTA_TIMEO)) {
5454 lck_mtx_unlock(&nmp->nm_lock);
5455 /* make sure we note the unresponsive server */
5456 /* (maxtime may be less than tprintf delay) */
5457 nfs_down(req->r_nmp, req->r_thread, 0, NFSSTA_TIMEO,
5458 "not responding", 1);
5459 req->r_lastmsg = now.tv_sec;
5460 req->r_flags |= R_TPRINTFMSG;
5461 } else {
5462 lck_mtx_unlock(&nmp->nm_lock);
5463 }
5464 if (req->r_flags & R_NOINTR) {
5465 /* don't terminate nointr requests on timeout */
5466 lck_mtx_unlock(&req->r_mtx);
5467 continue;
5468 }
5469 NFS_SOCK_DBG("nfs timer TERMINATE: p %d x 0x%llx f 0x%x rtt %d t %ld\n",
5470 req->r_procnum, req->r_xid, req->r_flags, req->r_rtt,
5471 now.tv_sec - req->r_start);
5472 nfs_softterm(req);
5473 finish_asyncio = ((req->r_callback.rcb_func != NULL) && !(req->r_flags & R_WAITSENT));
5474 wakeup(req);
5475 lck_mtx_unlock(&req->r_mtx);
5476 if (finish_asyncio) {
5477 nfs_asyncio_finish(req);
5478 }
5479 continue;
5480 }
5481
5482 /* for TCP, only resend if explicitly requested */
5483 if ((nmp->nm_sotype == SOCK_STREAM) && !(req->r_flags & R_MUSTRESEND)) {
5484 if (++req->r_rexmit > NFS_MAXREXMIT) {
5485 req->r_rexmit = NFS_MAXREXMIT;
5486 }
5487 req->r_rtt = 0;
5488 lck_mtx_unlock(&req->r_mtx);
5489 continue;
5490 }
5491
5492 /*
5493 * The request needs to be (re)sent. Kick the requester to resend it.
5494 * (unless it's already marked as needing a resend)
5495 */
5496 if ((req->r_flags & R_MUSTRESEND) && (req->r_rtt == -1)) {
5497 lck_mtx_unlock(&req->r_mtx);
5498 continue;
5499 }
5500 NFS_SOCK_DBG("nfs timer mark resend: p %d x 0x%llx f 0x%x rtt %d\n",
5501 req->r_procnum, req->r_xid, req->r_flags, req->r_rtt);
5502 req->r_flags |= R_MUSTRESEND;
5503 req->r_rtt = -1;
5504 wakeup(req);
5505 if ((req->r_flags & (R_IOD | R_ASYNC | R_ASYNCWAIT | R_SENDING)) == R_ASYNC) {
5506 nfs_asyncio_resend(req);
5507 }
5508 lck_mtx_unlock(&req->r_mtx);
5509 }
5510
5511 lck_mtx_unlock(nfs_request_mutex);
5512
5513 /* poke any sockets */
5514 while ((nmp = TAILQ_FIRST(&nfs_mount_poke_queue))) {
5515 TAILQ_REMOVE(&nfs_mount_poke_queue, nmp, nm_pokeq);
5516 nfs_sock_poke(nmp);
5517 nfs_mount_rele(nmp);
5518 }
5519
5520 nfs_interval_timer_start(nfs_request_timer_call, NFS_REQUESTDELAY);
5521 }
5522
5523 /*
5524 * check a thread's proc for the "noremotehang" flag.
5525 */
5526 int
5527 nfs_noremotehang(thread_t thd)
5528 {
5529 proc_t p = thd ? get_bsdthreadtask_info(thd) : NULL;
5530 return p && proc_noremotehang(p);
5531 }
5532
5533 /*
5534 * Test for a termination condition pending on the process.
5535 * This is used to determine if we need to bail on a mount.
5536 * ETIMEDOUT is returned if there has been a soft timeout.
5537 * EINTR is returned if there is a signal pending that is not being ignored
5538 * and the mount is interruptable, or if we are a thread that is in the process
5539 * of cancellation (also SIGKILL posted).
5540 */
5541 extern int sigprop[NSIG + 1];
5542 int
5543 nfs_sigintr(struct nfsmount *nmp, struct nfsreq *req, thread_t thd, int nmplocked)
5544 {
5545 proc_t p;
5546 int error = 0;
5547
5548 if (!nmp) {
5549 return ENXIO;
5550 }
5551
5552 if (req && (req->r_flags & R_SOFTTERM)) {
5553 return ETIMEDOUT; /* request has been terminated. */
5554 }
5555 if (req && (req->r_flags & R_NOINTR)) {
5556 thd = NULL; /* don't check for signal on R_NOINTR */
5557 }
5558 if (!nmplocked) {
5559 lck_mtx_lock(&nmp->nm_lock);
5560 }
5561 if (nmp->nm_state & NFSSTA_FORCE) {
5562 /* If a force unmount is in progress then fail. */
5563 error = EIO;
5564 } else if (vfs_isforce(nmp->nm_mountp)) {
5565 /* Someone is unmounting us, go soft and mark it. */
5566 NFS_BITMAP_SET(nmp->nm_flags, NFS_MFLAG_SOFT);
5567 nmp->nm_state |= NFSSTA_FORCE;
5568 }
5569
5570 /* Check if the mount is marked dead. */
5571 if (!error && (nmp->nm_state & NFSSTA_DEAD)) {
5572 error = ENXIO;
5573 }
5574
5575 /*
5576 * If the mount is hung and we've requested not to hang
5577 * on remote filesystems, then bail now.
5578 */
5579 if (current_proc() != kernproc &&
5580 !error && (nmp->nm_state & NFSSTA_TIMEO) && nfs_noremotehang(thd)) {
5581 error = EIO;
5582 }
5583
5584 if (!nmplocked) {
5585 lck_mtx_unlock(&nmp->nm_lock);
5586 }
5587 if (error) {
5588 return error;
5589 }
5590
5591 /* may not have a thread for async I/O */
5592 if (thd == NULL || current_proc() == kernproc) {
5593 return 0;
5594 }
5595
5596 /*
5597 * Check if the process is aborted, but don't interrupt if we
5598 * were killed by a signal and this is the exiting thread which
5599 * is attempting to dump core.
5600 */
5601 if (((p = current_proc()) != kernproc) && current_thread_aborted() &&
5602 (!(p->p_acflag & AXSIG) || (p->exit_thread != current_thread()) ||
5603 (p->p_sigacts == NULL) ||
5604 (p->p_sigacts->ps_sig < 1) || (p->p_sigacts->ps_sig > NSIG) ||
5605 !(sigprop[p->p_sigacts->ps_sig] & SA_CORE))) {
5606 return EINTR;
5607 }
5608
5609 /* mask off thread and process blocked signals. */
5610 if (NMFLAG(nmp, INTR) && ((p = get_bsdthreadtask_info(thd))) &&
5611 proc_pendingsignals(p, NFSINT_SIGMASK)) {
5612 return EINTR;
5613 }
5614 return 0;
5615 }
5616
5617 /*
5618 * Lock a socket against others.
5619 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply
5620 * and also to avoid race conditions between the processes with nfs requests
5621 * in progress when a reconnect is necessary.
5622 */
5623 int
5624 nfs_sndlock(struct nfsreq *req)
5625 {
5626 struct nfsmount *nmp = req->r_nmp;
5627 int *statep;
5628 int error = 0, slpflag = 0;
5629 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0 };
5630
5631 if (nfs_mount_gone(nmp)) {
5632 return ENXIO;
5633 }
5634
5635 lck_mtx_lock(&nmp->nm_lock);
5636 statep = &nmp->nm_state;
5637
5638 if (NMFLAG(nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) {
5639 slpflag = PCATCH;
5640 }
5641 while (*statep & NFSSTA_SNDLOCK) {
5642 if ((error = nfs_sigintr(nmp, req, req->r_thread, 1))) {
5643 break;
5644 }
5645 *statep |= NFSSTA_WANTSND;
5646 if (nfs_noremotehang(req->r_thread)) {
5647 ts.tv_sec = 1;
5648 }
5649 msleep(statep, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsndlck", &ts);
5650 if (slpflag == PCATCH) {
5651 slpflag = 0;
5652 ts.tv_sec = 2;
5653 }
5654 }
5655 if (!error) {
5656 *statep |= NFSSTA_SNDLOCK;
5657 }
5658 lck_mtx_unlock(&nmp->nm_lock);
5659 return error;
5660 }
5661
5662 /*
5663 * Unlock the stream socket for others.
5664 */
5665 void
5666 nfs_sndunlock(struct nfsreq *req)
5667 {
5668 struct nfsmount *nmp = req->r_nmp;
5669 int *statep, wake = 0;
5670
5671 if (!nmp) {
5672 return;
5673 }
5674 lck_mtx_lock(&nmp->nm_lock);
5675 statep = &nmp->nm_state;
5676 if ((*statep & NFSSTA_SNDLOCK) == 0) {
5677 panic("nfs sndunlock");
5678 }
5679 *statep &= ~(NFSSTA_SNDLOCK | NFSSTA_SENDING);
5680 if (*statep & NFSSTA_WANTSND) {
5681 *statep &= ~NFSSTA_WANTSND;
5682 wake = 1;
5683 }
5684 lck_mtx_unlock(&nmp->nm_lock);
5685 if (wake) {
5686 wakeup(statep);
5687 }
5688 }
5689
5690 int
5691 nfs_aux_request(
5692 struct nfsmount *nmp,
5693 thread_t thd,
5694 struct sockaddr *saddr,
5695 socket_t so,
5696 int sotype,
5697 mbuf_t mreq,
5698 uint32_t xid,
5699 int bindresv,
5700 int timeo,
5701 struct nfsm_chain *nmrep)
5702 {
5703 int error = 0, on = 1, try, sendat = 2, soproto, recv, optlen, restoreto = 0;
5704 socket_t newso = NULL;
5705 struct sockaddr_storage ss;
5706 struct timeval orig_rcvto, orig_sndto, tv = { .tv_sec = 1, .tv_usec = 0 };
5707 mbuf_t m, mrep = NULL;
5708 struct msghdr msg;
5709 uint32_t rxid = 0, reply = 0, reply_status, rejected_status;
5710 uint32_t verf_type, verf_len, accepted_status;
5711 size_t readlen, sentlen;
5712 struct nfs_rpc_record_state nrrs;
5713
5714 if (!so) {
5715 /* create socket and set options */
5716 if (saddr->sa_family == AF_LOCAL) {
5717 soproto = 0;
5718 } else {
5719 soproto = (sotype == SOCK_DGRAM) ? IPPROTO_UDP : IPPROTO_TCP;
5720 }
5721 if ((error = sock_socket(saddr->sa_family, sotype, soproto, NULL, NULL, &newso))) {
5722 goto nfsmout;
5723 }
5724
5725 if (bindresv && saddr->sa_family != AF_LOCAL) {
5726 int level = (saddr->sa_family == AF_INET) ? IPPROTO_IP : IPPROTO_IPV6;
5727 int optname = (saddr->sa_family == AF_INET) ? IP_PORTRANGE : IPV6_PORTRANGE;
5728 int portrange = IP_PORTRANGE_LOW;
5729 error = sock_setsockopt(newso, level, optname, &portrange, sizeof(portrange));
5730 nfsmout_if(error);
5731 ss.ss_len = saddr->sa_len;
5732 ss.ss_family = saddr->sa_family;
5733 if (ss.ss_family == AF_INET) {
5734 ((struct sockaddr_in*)&ss)->sin_addr.s_addr = INADDR_ANY;
5735 ((struct sockaddr_in*)&ss)->sin_port = htons(0);
5736 } else if (ss.ss_family == AF_INET6) {
5737 ((struct sockaddr_in6*)&ss)->sin6_addr = in6addr_any;
5738 ((struct sockaddr_in6*)&ss)->sin6_port = htons(0);
5739 } else {
5740 error = EINVAL;
5741 }
5742 if (!error) {
5743 error = sock_bind(newso, (struct sockaddr *)&ss);
5744 }
5745 nfsmout_if(error);
5746 }
5747
5748 if (sotype == SOCK_STREAM) {
5749 # define NFS_AUX_CONNECTION_TIMEOUT 4 /* 4 second timeout for connections */
5750 int count = 0;
5751
5752 error = sock_connect(newso, saddr, MSG_DONTWAIT);
5753 if (error == EINPROGRESS) {
5754 error = 0;
5755 }
5756 nfsmout_if(error);
5757
5758 while ((error = sock_connectwait(newso, &tv)) == EINPROGRESS) {
5759 /* After NFS_AUX_CONNECTION_TIMEOUT bail */
5760 if (++count >= NFS_AUX_CONNECTION_TIMEOUT) {
5761 error = ETIMEDOUT;
5762 break;
5763 }
5764 }
5765 nfsmout_if(error);
5766 }
5767 if (((error = sock_setsockopt(newso, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)))) ||
5768 ((error = sock_setsockopt(newso, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)))) ||
5769 ((error = sock_setsockopt(newso, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on))))) {
5770 goto nfsmout;
5771 }
5772 so = newso;
5773 } else {
5774 /* make sure socket is using a one second timeout in this function */
5775 optlen = sizeof(orig_rcvto);
5776 error = sock_getsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &orig_rcvto, &optlen);
5777 if (!error) {
5778 optlen = sizeof(orig_sndto);
5779 error = sock_getsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &orig_sndto, &optlen);
5780 }
5781 if (!error) {
5782 sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv));
5783 sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv));
5784 restoreto = 1;
5785 }
5786 }
5787
5788 if (sotype == SOCK_STREAM) {
5789 sendat = 0; /* we only resend the request for UDP */
5790 nfs_rpc_record_state_init(&nrrs);
5791 }
5792
5793 for (try = 0; try < timeo; try++) {
5794 if ((error = nfs_sigintr(nmp, NULL, !try ? NULL : thd, 0))) {
5795 break;
5796 }
5797 if (!try || (try == sendat)) {
5798 /* send the request (resending periodically for UDP) */
5799 if ((error = mbuf_copym(mreq, 0, MBUF_COPYALL, MBUF_WAITOK, &m))) {
5800 goto nfsmout;
5801 }
5802 bzero(&msg, sizeof(msg));
5803 if ((sotype == SOCK_DGRAM) && !sock_isconnected(so)) {
5804 msg.msg_name = saddr;
5805 msg.msg_namelen = saddr->sa_len;
5806 }
5807 if ((error = sock_sendmbuf(so, &msg, m, 0, &sentlen))) {
5808 goto nfsmout;
5809 }
5810 sendat *= 2;
5811 if (sendat > 30) {
5812 sendat = 30;
5813 }
5814 }
5815 /* wait for the response */
5816 if (sotype == SOCK_STREAM) {
5817 /* try to read (more of) record */
5818 error = nfs_rpc_record_read(so, &nrrs, 0, &recv, &mrep);
5819 /* if we don't have the whole record yet, we'll keep trying */
5820 } else {
5821 readlen = 1 << 18;
5822 bzero(&msg, sizeof(msg));
5823 error = sock_receivembuf(so, &msg, &mrep, 0, &readlen);
5824 }
5825 if (error == EWOULDBLOCK) {
5826 continue;
5827 }
5828 nfsmout_if(error);
5829 /* parse the response */
5830 nfsm_chain_dissect_init(error, nmrep, mrep);
5831 nfsm_chain_get_32(error, nmrep, rxid);
5832 nfsm_chain_get_32(error, nmrep, reply);
5833 nfsmout_if(error);
5834 if ((rxid != xid) || (reply != RPC_REPLY)) {
5835 error = EBADRPC;
5836 }
5837 nfsm_chain_get_32(error, nmrep, reply_status);
5838 nfsmout_if(error);
5839 if (reply_status == RPC_MSGDENIED) {
5840 nfsm_chain_get_32(error, nmrep, rejected_status);
5841 nfsmout_if(error);
5842 error = (rejected_status == RPC_MISMATCH) ? ERPCMISMATCH : EACCES;
5843 goto nfsmout;
5844 }
5845 nfsm_chain_get_32(error, nmrep, verf_type); /* verifier flavor */
5846 nfsm_chain_get_32(error, nmrep, verf_len); /* verifier length */
5847 nfsmout_if(error);
5848 if (verf_len) {
5849 nfsm_chain_adv(error, nmrep, nfsm_rndup(verf_len));
5850 }
5851 nfsm_chain_get_32(error, nmrep, accepted_status);
5852 nfsmout_if(error);
5853 switch (accepted_status) {
5854 case RPC_SUCCESS:
5855 error = 0;
5856 break;
5857 case RPC_PROGUNAVAIL:
5858 error = EPROGUNAVAIL;
5859 break;
5860 case RPC_PROGMISMATCH:
5861 error = EPROGMISMATCH;
5862 break;
5863 case RPC_PROCUNAVAIL:
5864 error = EPROCUNAVAIL;
5865 break;
5866 case RPC_GARBAGE:
5867 error = EBADRPC;
5868 break;
5869 case RPC_SYSTEM_ERR:
5870 default:
5871 error = EIO;
5872 break;
5873 }
5874 break;
5875 }
5876 nfsmout:
5877 if (restoreto) {
5878 sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &orig_rcvto, sizeof(tv));
5879 sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &orig_sndto, sizeof(tv));
5880 }
5881 if (newso) {
5882 sock_shutdown(newso, SHUT_RDWR);
5883 sock_close(newso);
5884 }
5885 mbuf_freem(mreq);
5886 return error;
5887 }
5888
5889 int
5890 nfs_portmap_lookup(
5891 struct nfsmount *nmp,
5892 vfs_context_t ctx,
5893 struct sockaddr *sa,
5894 socket_t so,
5895 uint32_t protocol,
5896 uint32_t vers,
5897 uint32_t stype,
5898 int timeo)
5899 {
5900 thread_t thd = vfs_context_thread(ctx);
5901 kauth_cred_t cred = vfs_context_ucred(ctx);
5902 struct sockaddr_storage ss;
5903 struct sockaddr *saddr = (struct sockaddr*)&ss;
5904 static struct sockaddr_un rpcbind_cots = {
5905 sizeof(struct sockaddr_un),
5906 AF_LOCAL,
5907 RPCB_TICOTSORD_PATH
5908 };
5909 static struct sockaddr_un rpcbind_clts = {
5910 sizeof(struct sockaddr_un),
5911 AF_LOCAL,
5912 RPCB_TICLTS_PATH
5913 };
5914 struct nfsm_chain nmreq, nmrep;
5915 mbuf_t mreq;
5916 int error = 0, ip, pmprog, pmvers, pmproc;
5917 uint32_t ualen = 0, scopeid = 0, port32;
5918 uint64_t xid = 0;
5919 char uaddr[MAX_IPv6_STR_LEN + 16];
5920
5921 bcopy(sa, saddr, min(sizeof(ss), sa->sa_len));
5922 if (saddr->sa_family == AF_INET) {
5923 ip = 4;
5924 pmprog = PMAPPROG;
5925 pmvers = PMAPVERS;
5926 pmproc = PMAPPROC_GETPORT;
5927 } else if (saddr->sa_family == AF_INET6) {
5928 ip = 6;
5929 pmprog = RPCBPROG;
5930 pmvers = RPCBVERS4;
5931 pmproc = RPCBPROC_GETVERSADDR;
5932 } else if (saddr->sa_family == AF_LOCAL) {
5933 ip = 0;
5934 pmprog = RPCBPROG;
5935 pmvers = RPCBVERS4;
5936 pmproc = RPCBPROC_GETVERSADDR;
5937 NFS_SOCK_DBG("%s\n", ((struct sockaddr_un*)sa)->sun_path);
5938 saddr = (struct sockaddr*)((stype == SOCK_STREAM) ? &rpcbind_cots : &rpcbind_clts);
5939 } else {
5940 return EINVAL;
5941 }
5942 nfsm_chain_null(&nmreq);
5943 nfsm_chain_null(&nmrep);
5944
5945 tryagain:
5946 /* send portmapper request to get port/uaddr */
5947 if (ip == 4) {
5948 ((struct sockaddr_in*)saddr)->sin_port = htons(PMAPPORT);
5949 } else if (ip == 6) {
5950 ((struct sockaddr_in6*)saddr)->sin6_port = htons(PMAPPORT);
5951 }
5952 nfsm_chain_build_alloc_init(error, &nmreq, 8 * NFSX_UNSIGNED);
5953 nfsm_chain_add_32(error, &nmreq, protocol);
5954 nfsm_chain_add_32(error, &nmreq, vers);
5955 if (ip == 4) {
5956 nfsm_chain_add_32(error, &nmreq, stype == SOCK_STREAM ? IPPROTO_TCP : IPPROTO_UDP);
5957 nfsm_chain_add_32(error, &nmreq, 0);
5958 } else {
5959 if (stype == SOCK_STREAM) {
5960 if (ip == 6) {
5961 nfsm_chain_add_string(error, &nmreq, "tcp6", 4);
5962 } else {
5963 nfsm_chain_add_string(error, &nmreq, "ticotsord", 9);
5964 }
5965 } else {
5966 if (ip == 6) {
5967 nfsm_chain_add_string(error, &nmreq, "udp6", 4);
5968 } else {
5969 nfsm_chain_add_string(error, &nmreq, "ticlts", 6);
5970 }
5971 }
5972 nfsm_chain_add_string(error, &nmreq, "", 0); /* uaddr */
5973 nfsm_chain_add_string(error, &nmreq, "", 0); /* owner */
5974 }
5975 nfsm_chain_build_done(error, &nmreq);
5976 nfsmout_if(error);
5977 error = nfsm_rpchead2(nmp, stype, pmprog, pmvers, pmproc,
5978 RPCAUTH_SYS, cred, NULL, nmreq.nmc_mhead, &xid, &mreq);
5979 nfsmout_if(error);
5980 nmreq.nmc_mhead = NULL;
5981
5982 NFS_SOCK_DUMP_MBUF("nfs_portmap_loockup request", mreq);
5983 error = nfs_aux_request(nmp, thd, saddr, so,
5984 stype, mreq, R_XID32(xid), 0, timeo, &nmrep);
5985 NFS_SOCK_DUMP_MBUF("nfs_portmap_lookup reply", nmrep.nmc_mhead);
5986 NFS_SOCK_DBG("rpcbind request returned %d for program %u vers %u: %s\n", error, protocol, vers,
5987 (saddr->sa_family == AF_LOCAL) ? ((struct sockaddr_un *)saddr)->sun_path :
5988 (saddr->sa_family == AF_INET6) ? "INET6 socket" : "INET socket");
5989
5990 /* grab port from portmap response */
5991 if (ip == 4) {
5992 nfsm_chain_get_32(error, &nmrep, port32);
5993 if (!error) {
5994 if (NFS_PORT_INVALID(port32)) {
5995 error = EBADRPC;
5996 } else {
5997 ((struct sockaddr_in*)sa)->sin_port = htons((in_port_t)port32);
5998 }
5999 }
6000 } else {
6001 /* get uaddr string and convert to sockaddr */
6002 nfsm_chain_get_32(error, &nmrep, ualen);
6003 if (!error) {
6004 if (ualen > (sizeof(uaddr) - 1)) {
6005 error = EIO;
6006 }
6007 if (ualen < 1) {
6008 /* program is not available, just return a zero port */
6009 bcopy(sa, saddr, min(sizeof(ss), sa->sa_len));
6010 if (ip == 6) {
6011 ((struct sockaddr_in6*)saddr)->sin6_port = htons(0);
6012 } else {
6013 ((struct sockaddr_un*)saddr)->sun_path[0] = '\0';
6014 }
6015 NFS_SOCK_DBG("Program %u version %u unavailable", protocol, vers);
6016 } else {
6017 nfsm_chain_get_opaque(error, &nmrep, ualen, uaddr);
6018 NFS_SOCK_DBG("Got uaddr %s\n", uaddr);
6019 if (!error) {
6020 uaddr[ualen] = '\0';
6021 if (ip == 6) {
6022 scopeid = ((struct sockaddr_in6*)saddr)->sin6_scope_id;
6023 }
6024 if (!nfs_uaddr2sockaddr(uaddr, saddr)) {
6025 error = EIO;
6026 }
6027 if (ip == 6 && scopeid != ((struct sockaddr_in6*)saddr)->sin6_scope_id) {
6028 NFS_SOCK_DBG("Setting scope_id from %u to %u\n", ((struct sockaddr_in6*)saddr)->sin6_scope_id, scopeid);
6029 ((struct sockaddr_in6*)saddr)->sin6_scope_id = scopeid;
6030 }
6031 }
6032 }
6033 }
6034 if ((error == EPROGMISMATCH) || (error == EPROCUNAVAIL) || (error == EIO) || (error == EBADRPC)) {
6035 /* remote doesn't support rpcbind version or proc (or we couldn't parse uaddr) */
6036 if (pmvers == RPCBVERS4) {
6037 /* fall back to v3 and GETADDR */
6038 pmvers = RPCBVERS3;
6039 pmproc = RPCBPROC_GETADDR;
6040 nfsm_chain_cleanup(&nmreq);
6041 nfsm_chain_cleanup(&nmrep);
6042 bcopy(sa, saddr, min(sizeof(ss), sa->sa_len));
6043 xid = 0;
6044 error = 0;
6045 goto tryagain;
6046 }
6047 }
6048 if (!error) {
6049 bcopy(saddr, sa, min(saddr->sa_len, sa->sa_len));
6050 }
6051 }
6052 nfsmout:
6053 nfsm_chain_cleanup(&nmreq);
6054 nfsm_chain_cleanup(&nmrep);
6055 NFS_SOCK_DBG("Returned %d\n", error);
6056
6057 return error;
6058 }
6059
6060 int
6061 nfs_msg(thread_t thd,
6062 const char *server,
6063 const char *msg,
6064 int error)
6065 {
6066 proc_t p = thd ? get_bsdthreadtask_info(thd) : NULL;
6067 tpr_t tpr;
6068
6069 if (p) {
6070 tpr = tprintf_open(p);
6071 } else {
6072 tpr = NULL;
6073 }
6074 if (error) {
6075 tprintf(tpr, "nfs server %s: %s, error %d\n", server, msg, error);
6076 } else {
6077 tprintf(tpr, "nfs server %s: %s\n", server, msg);
6078 }
6079 tprintf_close(tpr);
6080 return 0;
6081 }
6082
6083 #define NFS_SQUISH_MOBILE_ONLY 0x0001 /* Squish mounts only on mobile machines */
6084 #define NFS_SQUISH_AUTOMOUNTED_ONLY 0x0002 /* Squish mounts only if the are automounted */
6085 #define NFS_SQUISH_SOFT 0x0004 /* Treat all soft mounts as though they were on a mobile machine */
6086 #define NFS_SQUISH_QUICK 0x0008 /* Try to squish mounts more quickly. */
6087 #define NFS_SQUISH_SHUTDOWN 0x1000 /* Squish all mounts on shutdown. Currently not implemented */
6088
6089 uint32_t nfs_squishy_flags = NFS_SQUISH_MOBILE_ONLY | NFS_SQUISH_AUTOMOUNTED_ONLY | NFS_SQUISH_QUICK;
6090 uint32_t nfs_tcp_sockbuf = 128 * 1024; /* Default value of tcp_sendspace and tcp_recvspace */
6091 int32_t nfs_is_mobile;
6092
6093 #define NFS_SQUISHY_DEADTIMEOUT 8 /* Dead time out for squishy mounts */
6094 #define NFS_SQUISHY_QUICKTIMEOUT 4 /* Quicker dead time out when nfs_squish_flags NFS_SQUISH_QUICK bit is set*/
6095
6096 /*
6097 * Could this mount be squished?
6098 */
6099 int
6100 nfs_can_squish(struct nfsmount *nmp)
6101 {
6102 uint64_t flags = vfs_flags(nmp->nm_mountp);
6103 int softsquish = ((nfs_squishy_flags & NFS_SQUISH_SOFT) & NMFLAG(nmp, SOFT));
6104
6105 if (!softsquish && (nfs_squishy_flags & NFS_SQUISH_MOBILE_ONLY) && nfs_is_mobile == 0) {
6106 return 0;
6107 }
6108
6109 if ((nfs_squishy_flags & NFS_SQUISH_AUTOMOUNTED_ONLY) && (flags & MNT_AUTOMOUNTED) == 0) {
6110 return 0;
6111 }
6112
6113 return 1;
6114 }
6115
6116 /*
6117 * NFS mounts default to "rw,hard" - but frequently on mobile clients
6118 * the mount may become "not responding". It's desirable to be able
6119 * to unmount these dead mounts, but only if there is no risk of
6120 * losing data or crashing applications. A "squishy" NFS mount is one
6121 * that can be force unmounted with little risk of harm.
6122 *
6123 * nfs_is_squishy checks if a mount is in a squishy state. A mount is
6124 * in a squishy state iff it is allowed to be squishy and there are no
6125 * dirty pages and there are no mmapped files and there are no files
6126 * open for write. Mounts are allowed to be squishy is controlled by
6127 * the settings of the nfs_squishy_flags and its mobility state. These
6128 * flags can be set by sysctls.
6129 *
6130 * If nfs_is_squishy determines that we are in a squishy state we will
6131 * update the current dead timeout to at least NFS_SQUISHY_DEADTIMEOUT
6132 * (or NFS_SQUISHY_QUICKTIMEOUT if NFS_SQUISH_QUICK is set) (see
6133 * above) or 1/8th of the mount's nm_deadtimeout value, otherwise we just
6134 * update the current dead timeout with the mount's nm_deadtimeout
6135 * value set at mount time.
6136 *
6137 * Assumes that nm_lock is held.
6138 *
6139 * Note this routine is racey, but its effects on setting the
6140 * dead timeout only have effects when we're in trouble and are likely
6141 * to stay that way. Since by default its only for automounted
6142 * volumes on mobile machines; this is a reasonable trade off between
6143 * data integrity and user experience. It can be disabled or set via
6144 * nfs.conf file.
6145 */
6146
6147 int
6148 nfs_is_squishy(struct nfsmount *nmp)
6149 {
6150 mount_t mp = nmp->nm_mountp;
6151 int squishy = 0;
6152 int timeo = (nfs_squishy_flags & NFS_SQUISH_QUICK) ? NFS_SQUISHY_QUICKTIMEOUT : NFS_SQUISHY_DEADTIMEOUT;
6153
6154 NFS_SOCK_DBG("%s: nm_curdeadtimeout = %d, nfs_is_mobile = %d\n",
6155 vfs_statfs(mp)->f_mntfromname, nmp->nm_curdeadtimeout, nfs_is_mobile);
6156
6157 if (!nfs_can_squish(nmp)) {
6158 goto out;
6159 }
6160
6161 timeo = (nmp->nm_deadtimeout > timeo) ? max(nmp->nm_deadtimeout / 8, timeo) : timeo;
6162 NFS_SOCK_DBG("nm_writers = %d nm_mappers = %d timeo = %d\n", nmp->nm_writers, nmp->nm_mappers, timeo);
6163
6164 if (nmp->nm_writers == 0 && nmp->nm_mappers == 0) {
6165 uint64_t flags = mp ? vfs_flags(mp) : 0;
6166 squishy = 1;
6167
6168 /*
6169 * Walk the nfs nodes and check for dirty buffers it we're not
6170 * RDONLY and we've not already been declared as squishy since
6171 * this can be a bit expensive.
6172 */
6173 if (!(flags & MNT_RDONLY) && !(nmp->nm_state & NFSSTA_SQUISHY)) {
6174 squishy = !nfs_mount_is_dirty(mp);
6175 }
6176 }
6177
6178 out:
6179 if (squishy) {
6180 nmp->nm_state |= NFSSTA_SQUISHY;
6181 } else {
6182 nmp->nm_state &= ~NFSSTA_SQUISHY;
6183 }
6184
6185 nmp->nm_curdeadtimeout = squishy ? timeo : nmp->nm_deadtimeout;
6186
6187 NFS_SOCK_DBG("nm_curdeadtimeout = %d\n", nmp->nm_curdeadtimeout);
6188
6189 return squishy;
6190 }
6191
6192 /*
6193 * On a send operation, if we can't reach the server and we've got only one server to talk to
6194 * and NFS_SQUISH_QUICK flag is set and we are in a squishy state then mark the mount as dead
6195 * and ask to be forcibly unmounted. Return 1 if we're dead and 0 otherwise.
6196 */
6197 int
6198 nfs_is_dead(int error, struct nfsmount *nmp)
6199 {
6200 fsid_t fsid;
6201
6202 lck_mtx_lock(&nmp->nm_lock);
6203 if (nmp->nm_state & NFSSTA_DEAD) {
6204 lck_mtx_unlock(&nmp->nm_lock);
6205 return 1;
6206 }
6207
6208 if ((error != ENETUNREACH && error != EHOSTUNREACH && error != EADDRNOTAVAIL) ||
6209 !(nmp->nm_locations.nl_numlocs == 1 && nmp->nm_locations.nl_locations[0]->nl_servcount == 1)) {
6210 lck_mtx_unlock(&nmp->nm_lock);
6211 return 0;
6212 }
6213
6214 if ((nfs_squishy_flags & NFS_SQUISH_QUICK) && nfs_is_squishy(nmp)) {
6215 printf("nfs_is_dead: nfs server %s: unreachable. Squished dead\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
6216 fsid = vfs_statfs(nmp->nm_mountp)->f_fsid;
6217 lck_mtx_unlock(&nmp->nm_lock);
6218 nfs_mount_zombie(nmp, NFSSTA_DEAD);
6219 vfs_event_signal(&fsid, VQ_DEAD, 0);
6220 return 1;
6221 }
6222 lck_mtx_unlock(&nmp->nm_lock);
6223 return 0;
6224 }
6225
6226 /*
6227 * If we've experienced timeouts and we're not really a
6228 * classic hard mount, then just return cached data to
6229 * the caller instead of likely hanging on an RPC.
6230 */
6231 int
6232 nfs_use_cache(struct nfsmount *nmp)
6233 {
6234 /*
6235 *%%% We always let mobile users goto the cache,
6236 * perhaps we should not even require them to have
6237 * a timeout?
6238 */
6239 int cache_ok = (nfs_is_mobile || NMFLAG(nmp, SOFT) ||
6240 nfs_can_squish(nmp) || nmp->nm_deadtimeout);
6241
6242 int timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO;
6243
6244 /*
6245 * So if we have a timeout and we're not really a hard hard-mount,
6246 * return 1 to not get things out of the cache.
6247 */
6248
6249 return (nmp->nm_state & timeoutmask) && cache_ok;
6250 }
6251
6252 /*
6253 * Log a message that nfs or lockd server is unresponsive. Check if we
6254 * can be squished and if we can, or that our dead timeout has
6255 * expired, and we're not holding state, set our mount as dead, remove
6256 * our mount state and ask to be unmounted. If we are holding state
6257 * we're being called from the nfs_request_timer and will soon detect
6258 * that we need to unmount.
6259 */
6260 void
6261 nfs_down(struct nfsmount *nmp, thread_t thd, int error, int flags, const char *msg, int holding_state)
6262 {
6263 int timeoutmask, wasunresponsive, unresponsive, softnobrowse;
6264 uint32_t do_vfs_signal = 0;
6265 struct timeval now;
6266
6267 if (nfs_mount_gone(nmp)) {
6268 return;
6269 }
6270
6271 lck_mtx_lock(&nmp->nm_lock);
6272
6273 timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO;
6274 if (NMFLAG(nmp, MUTEJUKEBOX)) { /* jukebox timeouts don't count as unresponsive if muted */
6275 timeoutmask &= ~NFSSTA_JUKEBOXTIMEO;
6276 }
6277 wasunresponsive = (nmp->nm_state & timeoutmask);
6278
6279 /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */
6280 softnobrowse = (NMFLAG(nmp, SOFT) && (vfs_flags(nmp->nm_mountp) & MNT_DONTBROWSE));
6281
6282 if ((flags & NFSSTA_TIMEO) && !(nmp->nm_state & NFSSTA_TIMEO)) {
6283 nmp->nm_state |= NFSSTA_TIMEO;
6284 }
6285 if ((flags & NFSSTA_LOCKTIMEO) && !(nmp->nm_state & NFSSTA_LOCKTIMEO)) {
6286 nmp->nm_state |= NFSSTA_LOCKTIMEO;
6287 }
6288 if ((flags & NFSSTA_JUKEBOXTIMEO) && !(nmp->nm_state & NFSSTA_JUKEBOXTIMEO)) {
6289 nmp->nm_state |= NFSSTA_JUKEBOXTIMEO;
6290 }
6291
6292 unresponsive = (nmp->nm_state & timeoutmask);
6293
6294 nfs_is_squishy(nmp);
6295
6296 if (unresponsive && (nmp->nm_curdeadtimeout > 0)) {
6297 microuptime(&now);
6298 if (!wasunresponsive) {
6299 nmp->nm_deadto_start = now.tv_sec;
6300 nfs_mount_sock_thread_wake(nmp);
6301 } else if ((now.tv_sec - nmp->nm_deadto_start) > nmp->nm_curdeadtimeout && !holding_state) {
6302 if (!(nmp->nm_state & NFSSTA_DEAD)) {
6303 printf("nfs server %s: %sdead\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname,
6304 (nmp->nm_curdeadtimeout != nmp->nm_deadtimeout) ? "squished " : "");
6305 }
6306 do_vfs_signal = VQ_DEAD;
6307 }
6308 }
6309 lck_mtx_unlock(&nmp->nm_lock);
6310
6311 if (do_vfs_signal == VQ_DEAD && !(nmp->nm_state & NFSSTA_DEAD)) {
6312 nfs_mount_zombie(nmp, NFSSTA_DEAD);
6313 } else if (softnobrowse || wasunresponsive || !unresponsive) {
6314 do_vfs_signal = 0;
6315 } else {
6316 do_vfs_signal = VQ_NOTRESP;
6317 }
6318 if (do_vfs_signal) {
6319 vfs_event_signal(&vfs_statfs(nmp->nm_mountp)->f_fsid, do_vfs_signal, 0);
6320 }
6321
6322 nfs_msg(thd, vfs_statfs(nmp->nm_mountp)->f_mntfromname, msg, error);
6323 }
6324
6325 void
6326 nfs_up(struct nfsmount *nmp, thread_t thd, int flags, const char *msg)
6327 {
6328 int timeoutmask, wasunresponsive, unresponsive, softnobrowse;
6329 int do_vfs_signal;
6330
6331 if (nfs_mount_gone(nmp)) {
6332 return;
6333 }
6334
6335 if (msg) {
6336 nfs_msg(thd, vfs_statfs(nmp->nm_mountp)->f_mntfromname, msg, 0);
6337 }
6338
6339 lck_mtx_lock(&nmp->nm_lock);
6340
6341 timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO;
6342 if (NMFLAG(nmp, MUTEJUKEBOX)) { /* jukebox timeouts don't count as unresponsive if muted */
6343 timeoutmask &= ~NFSSTA_JUKEBOXTIMEO;
6344 }
6345 wasunresponsive = (nmp->nm_state & timeoutmask);
6346
6347 /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */
6348 softnobrowse = (NMFLAG(nmp, SOFT) && (vfs_flags(nmp->nm_mountp) & MNT_DONTBROWSE));
6349
6350 if ((flags & NFSSTA_TIMEO) && (nmp->nm_state & NFSSTA_TIMEO)) {
6351 nmp->nm_state &= ~NFSSTA_TIMEO;
6352 }
6353 if ((flags & NFSSTA_LOCKTIMEO) && (nmp->nm_state & NFSSTA_LOCKTIMEO)) {
6354 nmp->nm_state &= ~NFSSTA_LOCKTIMEO;
6355 }
6356 if ((flags & NFSSTA_JUKEBOXTIMEO) && (nmp->nm_state & NFSSTA_JUKEBOXTIMEO)) {
6357 nmp->nm_state &= ~NFSSTA_JUKEBOXTIMEO;
6358 }
6359
6360 unresponsive = (nmp->nm_state & timeoutmask);
6361
6362 nmp->nm_deadto_start = 0;
6363 nmp->nm_curdeadtimeout = nmp->nm_deadtimeout;
6364 nmp->nm_state &= ~NFSSTA_SQUISHY;
6365 lck_mtx_unlock(&nmp->nm_lock);
6366
6367 if (softnobrowse) {
6368 do_vfs_signal = 0;
6369 } else {
6370 do_vfs_signal = (wasunresponsive && !unresponsive);
6371 }
6372 if (do_vfs_signal) {
6373 vfs_event_signal(&vfs_statfs(nmp->nm_mountp)->f_fsid, VQ_NOTRESP, 1);
6374 }
6375 }
6376
6377
6378 #endif /* CONFIG_NFS_CLIENT */
6379
6380 #if CONFIG_NFS_SERVER
6381
6382 /*
6383 * Generate the rpc reply header
6384 * siz arg. is used to decide if adding a cluster is worthwhile
6385 */
6386 int
6387 nfsrv_rephead(
6388 struct nfsrv_descript *nd,
6389 __unused struct nfsrv_sock *slp,
6390 struct nfsm_chain *nmrepp,
6391 size_t siz)
6392 {
6393 mbuf_t mrep;
6394 u_int32_t *tl;
6395 struct nfsm_chain nmrep;
6396 int err, error;
6397
6398 err = nd->nd_repstat;
6399 if (err && (nd->nd_vers == NFS_VER2)) {
6400 siz = 0;
6401 }
6402
6403 /*
6404 * If this is a big reply, use a cluster else
6405 * try and leave leading space for the lower level headers.
6406 */
6407 siz += RPC_REPLYSIZ;
6408 if (siz >= nfs_mbuf_minclsize) {
6409 error = mbuf_getpacket(MBUF_WAITOK, &mrep);
6410 } else {
6411 error = mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_DATA, &mrep);
6412 }
6413 if (error) {
6414 /* unable to allocate packet */
6415 /* XXX should we keep statistics for these errors? */
6416 return error;
6417 }
6418 if (siz < nfs_mbuf_minclsize) {
6419 /* leave space for lower level headers */
6420 tl = mbuf_data(mrep);
6421 tl += 80 / sizeof(*tl); /* XXX max_hdr? XXX */
6422 mbuf_setdata(mrep, tl, 6 * NFSX_UNSIGNED);
6423 }
6424 nfsm_chain_init(&nmrep, mrep);
6425 nfsm_chain_add_32(error, &nmrep, nd->nd_retxid);
6426 nfsm_chain_add_32(error, &nmrep, RPC_REPLY);
6427 if (err == ERPCMISMATCH || (err & NFSERR_AUTHERR)) {
6428 nfsm_chain_add_32(error, &nmrep, RPC_MSGDENIED);
6429 if (err & NFSERR_AUTHERR) {
6430 nfsm_chain_add_32(error, &nmrep, RPC_AUTHERR);
6431 nfsm_chain_add_32(error, &nmrep, (err & ~NFSERR_AUTHERR));
6432 } else {
6433 nfsm_chain_add_32(error, &nmrep, RPC_MISMATCH);
6434 nfsm_chain_add_32(error, &nmrep, RPC_VER2);
6435 nfsm_chain_add_32(error, &nmrep, RPC_VER2);
6436 }
6437 } else {
6438 /* reply status */
6439 nfsm_chain_add_32(error, &nmrep, RPC_MSGACCEPTED);
6440 if (nd->nd_gss_context != NULL) {
6441 /* RPCSEC_GSS verifier */
6442 error = nfs_gss_svc_verf_put(nd, &nmrep);
6443 if (error) {
6444 nfsm_chain_add_32(error, &nmrep, RPC_SYSTEM_ERR);
6445 goto done;
6446 }
6447 } else {
6448 /* RPCAUTH_NULL verifier */
6449 nfsm_chain_add_32(error, &nmrep, RPCAUTH_NULL);
6450 nfsm_chain_add_32(error, &nmrep, 0);
6451 }
6452 /* accepted status */
6453 switch (err) {
6454 case EPROGUNAVAIL:
6455 nfsm_chain_add_32(error, &nmrep, RPC_PROGUNAVAIL);
6456 break;
6457 case EPROGMISMATCH:
6458 nfsm_chain_add_32(error, &nmrep, RPC_PROGMISMATCH);
6459 /* XXX hard coded versions? */
6460 nfsm_chain_add_32(error, &nmrep, NFS_VER2);
6461 nfsm_chain_add_32(error, &nmrep, NFS_VER3);
6462 break;
6463 case EPROCUNAVAIL:
6464 nfsm_chain_add_32(error, &nmrep, RPC_PROCUNAVAIL);
6465 break;
6466 case EBADRPC:
6467 nfsm_chain_add_32(error, &nmrep, RPC_GARBAGE);
6468 break;
6469 default:
6470 nfsm_chain_add_32(error, &nmrep, RPC_SUCCESS);
6471 if (nd->nd_gss_context != NULL) {
6472 error = nfs_gss_svc_prepare_reply(nd, &nmrep);
6473 }
6474 if (err != NFSERR_RETVOID) {
6475 nfsm_chain_add_32(error, &nmrep,
6476 (err ? nfsrv_errmap(nd, err) : 0));
6477 }
6478 break;
6479 }
6480 }
6481
6482 done:
6483 nfsm_chain_build_done(error, &nmrep);
6484 if (error) {
6485 /* error composing reply header */
6486 /* XXX should we keep statistics for these errors? */
6487 mbuf_freem(mrep);
6488 return error;
6489 }
6490
6491 *nmrepp = nmrep;
6492 if ((err != 0) && (err != NFSERR_RETVOID)) {
6493 OSAddAtomic64(1, &nfsstats.srvrpc_errs);
6494 }
6495 return 0;
6496 }
6497
6498 /*
6499 * The nfs server send routine.
6500 *
6501 * - return EINTR or ERESTART if interrupted by a signal
6502 * - return EPIPE if a connection is lost for connection based sockets (TCP...)
6503 * - do any cleanup required by recoverable socket errors (???)
6504 */
6505 int
6506 nfsrv_send(struct nfsrv_sock *slp, mbuf_t nam, mbuf_t top)
6507 {
6508 int error;
6509 socket_t so = slp->ns_so;
6510 struct sockaddr *sendnam;
6511 struct msghdr msg;
6512
6513 bzero(&msg, sizeof(msg));
6514 if (nam && !sock_isconnected(so) && (slp->ns_sotype != SOCK_STREAM)) {
6515 if ((sendnam = mbuf_data(nam))) {
6516 msg.msg_name = (caddr_t)sendnam;
6517 msg.msg_namelen = sendnam->sa_len;
6518 }
6519 }
6520 if (NFS_IS_DBG(NFS_FAC_SRV, 15)) {
6521 nfs_dump_mbuf(__func__, __LINE__, "nfsrv_send\n", top);
6522 }
6523 error = sock_sendmbuf(so, &msg, top, 0, NULL);
6524 if (!error) {
6525 return 0;
6526 }
6527 log(LOG_INFO, "nfsd send error %d\n", error);
6528
6529 if ((error == EWOULDBLOCK) && (slp->ns_sotype == SOCK_STREAM)) {
6530 error = EPIPE; /* zap TCP sockets if they time out on send */
6531 }
6532 /* Handle any recoverable (soft) socket errors here. (???) */
6533 if (error != EINTR && error != ERESTART && error != EIO &&
6534 error != EWOULDBLOCK && error != EPIPE) {
6535 error = 0;
6536 }
6537
6538 return error;
6539 }
6540
6541 /*
6542 * Socket upcall routine for the nfsd sockets.
6543 * The caddr_t arg is a pointer to the "struct nfsrv_sock".
6544 * Essentially do as much as possible non-blocking, else punt and it will
6545 * be called with MBUF_WAITOK from an nfsd.
6546 */
6547 void
6548 nfsrv_rcv(socket_t so, void *arg, int waitflag)
6549 {
6550 struct nfsrv_sock *slp = arg;
6551
6552 if (!nfsd_thread_count || !(slp->ns_flag & SLP_VALID)) {
6553 return;
6554 }
6555
6556 lck_rw_lock_exclusive(&slp->ns_rwlock);
6557 nfsrv_rcv_locked(so, slp, waitflag);
6558 /* Note: ns_rwlock gets dropped when called with MBUF_DONTWAIT */
6559 }
6560 void
6561 nfsrv_rcv_locked(socket_t so, struct nfsrv_sock *slp, int waitflag)
6562 {
6563 mbuf_t m, mp, mhck, m2;
6564 int ns_flag = 0, error;
6565 struct msghdr msg;
6566 size_t bytes_read;
6567
6568 if ((slp->ns_flag & SLP_VALID) == 0) {
6569 if (waitflag == MBUF_DONTWAIT) {
6570 lck_rw_done(&slp->ns_rwlock);
6571 }
6572 return;
6573 }
6574
6575 #ifdef notdef
6576 /*
6577 * Define this to test for nfsds handling this under heavy load.
6578 */
6579 if (waitflag == MBUF_DONTWAIT) {
6580 ns_flag = SLP_NEEDQ;
6581 goto dorecs;
6582 }
6583 #endif
6584 if (slp->ns_sotype == SOCK_STREAM) {
6585 /*
6586 * If there are already records on the queue, defer soreceive()
6587 * to an(other) nfsd so that there is feedback to the TCP layer that
6588 * the nfs servers are heavily loaded.
6589 */
6590 if (slp->ns_rec) {
6591 ns_flag = SLP_NEEDQ;
6592 goto dorecs;
6593 }
6594
6595 /*
6596 * Do soreceive().
6597 */
6598 bytes_read = 1000000000;
6599 error = sock_receivembuf(so, NULL, &mp, MSG_DONTWAIT, &bytes_read);
6600 if (error || mp == NULL) {
6601 if (error == EWOULDBLOCK) {
6602 ns_flag = (waitflag == MBUF_DONTWAIT) ? SLP_NEEDQ : 0;
6603 } else {
6604 ns_flag = SLP_DISCONN;
6605 }
6606 goto dorecs;
6607 }
6608 m = mp;
6609 if (slp->ns_rawend) {
6610 if ((error = mbuf_setnext(slp->ns_rawend, m))) {
6611 panic("nfsrv_rcv: mbuf_setnext failed %d\n", error);
6612 }
6613 slp->ns_cc += bytes_read;
6614 } else {
6615 slp->ns_raw = m;
6616 slp->ns_cc = bytes_read;
6617 }
6618 while ((m2 = mbuf_next(m))) {
6619 m = m2;
6620 }
6621 slp->ns_rawend = m;
6622
6623 /*
6624 * Now try and parse record(s) out of the raw stream data.
6625 */
6626 error = nfsrv_getstream(slp, waitflag);
6627 if (error) {
6628 if (error == EPERM) {
6629 ns_flag = SLP_DISCONN;
6630 } else {
6631 ns_flag = SLP_NEEDQ;
6632 }
6633 }
6634 } else {
6635 struct sockaddr_storage nam;
6636
6637 if (slp->ns_reccnt >= nfsrv_sock_max_rec_queue_length) {
6638 /* already have max # RPC records queued on this socket */
6639 ns_flag = SLP_NEEDQ;
6640 goto dorecs;
6641 }
6642
6643 bzero(&msg, sizeof(msg));
6644 msg.msg_name = (caddr_t)&nam;
6645 msg.msg_namelen = sizeof(nam);
6646
6647 do {
6648 bytes_read = 1000000000;
6649 error = sock_receivembuf(so, &msg, &mp, MSG_DONTWAIT | MSG_NEEDSA, &bytes_read);
6650 if (mp) {
6651 if (msg.msg_name && (mbuf_get(MBUF_WAITOK, MBUF_TYPE_SONAME, &mhck) == 0)) {
6652 mbuf_setlen(mhck, nam.ss_len);
6653 bcopy(&nam, mbuf_data(mhck), nam.ss_len);
6654 m = mhck;
6655 if (mbuf_setnext(m, mp)) {
6656 /* trouble... just drop it */
6657 printf("nfsrv_rcv: mbuf_setnext failed\n");
6658 mbuf_free(mhck);
6659 m = mp;
6660 }
6661 } else {
6662 m = mp;
6663 }
6664 if (slp->ns_recend) {
6665 mbuf_setnextpkt(slp->ns_recend, m);
6666 } else {
6667 slp->ns_rec = m;
6668 slp->ns_flag |= SLP_DOREC;
6669 }
6670 slp->ns_recend = m;
6671 mbuf_setnextpkt(m, NULL);
6672 slp->ns_reccnt++;
6673 }
6674 } while (mp);
6675 }
6676
6677 /*
6678 * Now try and process the request records, non-blocking.
6679 */
6680 dorecs:
6681 if (ns_flag) {
6682 slp->ns_flag |= ns_flag;
6683 }
6684 if (waitflag == MBUF_DONTWAIT) {
6685 int wake = (slp->ns_flag & SLP_WORKTODO);
6686 lck_rw_done(&slp->ns_rwlock);
6687 if (wake && nfsd_thread_count) {
6688 lck_mtx_lock(nfsd_mutex);
6689 nfsrv_wakenfsd(slp);
6690 lck_mtx_unlock(nfsd_mutex);
6691 }
6692 }
6693 }
6694
6695 /*
6696 * Try and extract an RPC request from the mbuf data list received on a
6697 * stream socket. The "waitflag" argument indicates whether or not it
6698 * can sleep.
6699 */
6700 int
6701 nfsrv_getstream(struct nfsrv_sock *slp, int waitflag)
6702 {
6703 mbuf_t m;
6704 char *cp1, *cp2, *mdata;
6705 int error;
6706 size_t len, mlen;
6707 mbuf_t om, m2, recm;
6708 u_int32_t recmark;
6709
6710 if (slp->ns_flag & SLP_GETSTREAM) {
6711 panic("nfs getstream");
6712 }
6713 slp->ns_flag |= SLP_GETSTREAM;
6714 for (;;) {
6715 if (slp->ns_reclen == 0) {
6716 if (slp->ns_cc < NFSX_UNSIGNED) {
6717 slp->ns_flag &= ~SLP_GETSTREAM;
6718 return 0;
6719 }
6720 m = slp->ns_raw;
6721 mdata = mbuf_data(m);
6722 mlen = mbuf_len(m);
6723 if (mlen >= NFSX_UNSIGNED) {
6724 bcopy(mdata, (caddr_t)&recmark, NFSX_UNSIGNED);
6725 mdata += NFSX_UNSIGNED;
6726 mlen -= NFSX_UNSIGNED;
6727 mbuf_setdata(m, mdata, mlen);
6728 } else {
6729 cp1 = (caddr_t)&recmark;
6730 cp2 = mdata;
6731 while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) {
6732 while (mlen == 0) {
6733 m = mbuf_next(m);
6734 cp2 = mbuf_data(m);
6735 mlen = mbuf_len(m);
6736 }
6737 *cp1++ = *cp2++;
6738 mlen--;
6739 mbuf_setdata(m, cp2, mlen);
6740 }
6741 }
6742 slp->ns_cc -= NFSX_UNSIGNED;
6743 recmark = ntohl(recmark);
6744 slp->ns_reclen = recmark & ~0x80000000;
6745 if (recmark & 0x80000000) {
6746 slp->ns_flag |= SLP_LASTFRAG;
6747 } else {
6748 slp->ns_flag &= ~SLP_LASTFRAG;
6749 }
6750 if (slp->ns_reclen <= 0 || slp->ns_reclen > NFS_MAXPACKET) {
6751 slp->ns_flag &= ~SLP_GETSTREAM;
6752 return EPERM;
6753 }
6754 }
6755
6756 /*
6757 * Now get the record part.
6758 *
6759 * Note that slp->ns_reclen may be 0. Linux sometimes
6760 * generates 0-length RPCs
6761 */
6762 recm = NULL;
6763 if (slp->ns_cc == slp->ns_reclen) {
6764 recm = slp->ns_raw;
6765 slp->ns_raw = slp->ns_rawend = NULL;
6766 slp->ns_cc = slp->ns_reclen = 0;
6767 } else if (slp->ns_cc > slp->ns_reclen) {
6768 len = 0;
6769 m = slp->ns_raw;
6770 mlen = mbuf_len(m);
6771 mdata = mbuf_data(m);
6772 om = NULL;
6773 while (len < slp->ns_reclen) {
6774 if ((len + mlen) > slp->ns_reclen) {
6775 if (mbuf_copym(m, 0, slp->ns_reclen - len, waitflag, &m2)) {
6776 slp->ns_flag &= ~SLP_GETSTREAM;
6777 return EWOULDBLOCK;
6778 }
6779 if (om) {
6780 if (mbuf_setnext(om, m2)) {
6781 /* trouble... just drop it */
6782 printf("nfsrv_getstream: mbuf_setnext failed\n");
6783 mbuf_freem(m2);
6784 slp->ns_flag &= ~SLP_GETSTREAM;
6785 return EWOULDBLOCK;
6786 }
6787 recm = slp->ns_raw;
6788 } else {
6789 recm = m2;
6790 }
6791 mdata += slp->ns_reclen - len;
6792 mlen -= slp->ns_reclen - len;
6793 mbuf_setdata(m, mdata, mlen);
6794 len = slp->ns_reclen;
6795 } else if ((len + mlen) == slp->ns_reclen) {
6796 om = m;
6797 len += mlen;
6798 m = mbuf_next(m);
6799 recm = slp->ns_raw;
6800 if (mbuf_setnext(om, NULL)) {
6801 printf("nfsrv_getstream: mbuf_setnext failed 2\n");
6802 slp->ns_flag &= ~SLP_GETSTREAM;
6803 return EWOULDBLOCK;
6804 }
6805 mlen = mbuf_len(m);
6806 mdata = mbuf_data(m);
6807 } else {
6808 om = m;
6809 len += mlen;
6810 m = mbuf_next(m);
6811 mlen = mbuf_len(m);
6812 mdata = mbuf_data(m);
6813 }
6814 }
6815 slp->ns_raw = m;
6816 slp->ns_cc -= len;
6817 slp->ns_reclen = 0;
6818 } else {
6819 slp->ns_flag &= ~SLP_GETSTREAM;
6820 return 0;
6821 }
6822
6823 /*
6824 * Accumulate the fragments into a record.
6825 */
6826 if (slp->ns_frag == NULL) {
6827 slp->ns_frag = recm;
6828 } else {
6829 m = slp->ns_frag;
6830 while ((m2 = mbuf_next(m))) {
6831 m = m2;
6832 }
6833 if ((error = mbuf_setnext(m, recm))) {
6834 panic("nfsrv_getstream: mbuf_setnext failed 3, %d\n", error);
6835 }
6836 }
6837 if (slp->ns_flag & SLP_LASTFRAG) {
6838 if (slp->ns_recend) {
6839 mbuf_setnextpkt(slp->ns_recend, slp->ns_frag);
6840 } else {
6841 slp->ns_rec = slp->ns_frag;
6842 slp->ns_flag |= SLP_DOREC;
6843 }
6844 slp->ns_recend = slp->ns_frag;
6845 slp->ns_frag = NULL;
6846 }
6847 }
6848 }
6849
6850 /*
6851 * Parse an RPC header.
6852 */
6853 int
6854 nfsrv_dorec(
6855 struct nfsrv_sock *slp,
6856 struct nfsd *nfsd,
6857 struct nfsrv_descript **ndp)
6858 {
6859 mbuf_t m;
6860 mbuf_t nam;
6861 struct nfsrv_descript *nd;
6862 int error = 0;
6863
6864 *ndp = NULL;
6865 if (!(slp->ns_flag & (SLP_VALID | SLP_DOREC)) || (slp->ns_rec == NULL)) {
6866 return ENOBUFS;
6867 }
6868 nd = zalloc(nfsrv_descript_zone);
6869 m = slp->ns_rec;
6870 slp->ns_rec = mbuf_nextpkt(m);
6871 if (slp->ns_rec) {
6872 mbuf_setnextpkt(m, NULL);
6873 } else {
6874 slp->ns_flag &= ~SLP_DOREC;
6875 slp->ns_recend = NULL;
6876 }
6877 slp->ns_reccnt--;
6878 if (mbuf_type(m) == MBUF_TYPE_SONAME) {
6879 nam = m;
6880 m = mbuf_next(m);
6881 if ((error = mbuf_setnext(nam, NULL))) {
6882 panic("nfsrv_dorec: mbuf_setnext failed %d\n", error);
6883 }
6884 } else {
6885 nam = NULL;
6886 }
6887 nd->nd_nam2 = nam;
6888 nfsm_chain_dissect_init(error, &nd->nd_nmreq, m);
6889 if (!error) {
6890 error = nfsrv_getreq(nd);
6891 }
6892 if (error) {
6893 if (nam) {
6894 mbuf_freem(nam);
6895 }
6896 if (nd->nd_gss_context) {
6897 nfs_gss_svc_ctx_deref(nd->nd_gss_context);
6898 }
6899 NFS_ZFREE(nfsrv_descript_zone, nd);
6900 return error;
6901 }
6902 nd->nd_mrep = NULL;
6903 *ndp = nd;
6904 nfsd->nfsd_nd = nd;
6905 return 0;
6906 }
6907
6908 /*
6909 * Parse an RPC request
6910 * - verify it
6911 * - fill in the cred struct.
6912 */
6913 int
6914 nfsrv_getreq(struct nfsrv_descript *nd)
6915 {
6916 struct nfsm_chain *nmreq;
6917 int len, i;
6918 u_int32_t nfsvers, auth_type;
6919 int error = 0;
6920 uid_t user_id;
6921 gid_t group_id;
6922 short ngroups;
6923 uint32_t val;
6924
6925 nd->nd_cr = NULL;
6926 nd->nd_gss_context = NULL;
6927 nd->nd_gss_seqnum = 0;
6928 nd->nd_gss_mb = NULL;
6929
6930 user_id = group_id = -2;
6931 val = auth_type = len = 0;
6932
6933 nmreq = &nd->nd_nmreq;
6934 nfsm_chain_get_32(error, nmreq, nd->nd_retxid); // XID
6935 nfsm_chain_get_32(error, nmreq, val); // RPC Call
6936 if (!error && (val != RPC_CALL)) {
6937 error = EBADRPC;
6938 }
6939 nfsmout_if(error);
6940 nd->nd_repstat = 0;
6941 nfsm_chain_get_32(error, nmreq, val); // RPC Version
6942 nfsmout_if(error);
6943 if (val != RPC_VER2) {
6944 nd->nd_repstat = ERPCMISMATCH;
6945 nd->nd_procnum = NFSPROC_NOOP;
6946 return 0;
6947 }
6948 nfsm_chain_get_32(error, nmreq, val); // RPC Program Number
6949 nfsmout_if(error);
6950 if (val != NFS_PROG) {
6951 nd->nd_repstat = EPROGUNAVAIL;
6952 nd->nd_procnum = NFSPROC_NOOP;
6953 return 0;
6954 }
6955 nfsm_chain_get_32(error, nmreq, nfsvers);// NFS Version Number
6956 nfsmout_if(error);
6957 if ((nfsvers < NFS_VER2) || (nfsvers > NFS_VER3)) {
6958 nd->nd_repstat = EPROGMISMATCH;
6959 nd->nd_procnum = NFSPROC_NOOP;
6960 return 0;
6961 }
6962 nd->nd_vers = nfsvers;
6963 nfsm_chain_get_32(error, nmreq, nd->nd_procnum);// NFS Procedure Number
6964 nfsmout_if(error);
6965 if ((nd->nd_procnum >= NFS_NPROCS) ||
6966 ((nd->nd_vers == NFS_VER2) && (nd->nd_procnum > NFSV2PROC_STATFS))) {
6967 nd->nd_repstat = EPROCUNAVAIL;
6968 nd->nd_procnum = NFSPROC_NOOP;
6969 return 0;
6970 }
6971 if (nfsvers != NFS_VER3) {
6972 nd->nd_procnum = nfsv3_procid[nd->nd_procnum];
6973 }
6974 nfsm_chain_get_32(error, nmreq, auth_type); // Auth Flavor
6975 nfsm_chain_get_32(error, nmreq, len); // Auth Length
6976 if (!error && (len < 0 || len > RPCAUTH_MAXSIZ)) {
6977 error = EBADRPC;
6978 }
6979 nfsmout_if(error);
6980
6981 /* Handle authentication */
6982 if (auth_type == RPCAUTH_SYS) {
6983 struct posix_cred temp_pcred;
6984 if (nd->nd_procnum == NFSPROC_NULL) {
6985 return 0;
6986 }
6987 nd->nd_sec = RPCAUTH_SYS;
6988 nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED); // skip stamp
6989 nfsm_chain_get_32(error, nmreq, len); // hostname length
6990 if (len < 0 || len > NFS_MAXNAMLEN) {
6991 error = EBADRPC;
6992 }
6993 nfsm_chain_adv(error, nmreq, nfsm_rndup(len)); // skip hostname
6994 nfsmout_if(error);
6995
6996 /* create a temporary credential using the bits from the wire */
6997 bzero(&temp_pcred, sizeof(temp_pcred));
6998 nfsm_chain_get_32(error, nmreq, user_id);
6999 nfsm_chain_get_32(error, nmreq, group_id);
7000 temp_pcred.cr_groups[0] = group_id;
7001 nfsm_chain_get_32(error, nmreq, len); // extra GID count
7002 if ((len < 0) || (len > RPCAUTH_UNIXGIDS)) {
7003 error = EBADRPC;
7004 }
7005 nfsmout_if(error);
7006 for (i = 1; i <= len; i++) {
7007 if (i < NGROUPS) {
7008 nfsm_chain_get_32(error, nmreq, temp_pcred.cr_groups[i]);
7009 } else {
7010 nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED);
7011 }
7012 }
7013 nfsmout_if(error);
7014 ngroups = (len >= NGROUPS) ? NGROUPS : (short)(len + 1);
7015 if (ngroups > 1) {
7016 nfsrv_group_sort(&temp_pcred.cr_groups[0], ngroups);
7017 }
7018 nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED); // verifier flavor (should be AUTH_NONE)
7019 nfsm_chain_get_32(error, nmreq, len); // verifier length
7020 if (len < 0 || len > RPCAUTH_MAXSIZ) {
7021 error = EBADRPC;
7022 }
7023 if (len > 0) {
7024 nfsm_chain_adv(error, nmreq, nfsm_rndup(len));
7025 }
7026
7027 /* request creation of a real credential */
7028 temp_pcred.cr_uid = user_id;
7029 temp_pcred.cr_ngroups = ngroups;
7030 nd->nd_cr = posix_cred_create(&temp_pcred);
7031 if (nd->nd_cr == NULL) {
7032 nd->nd_repstat = ENOMEM;
7033 nd->nd_procnum = NFSPROC_NOOP;
7034 return 0;
7035 }
7036 } else if (auth_type == RPCSEC_GSS) {
7037 error = nfs_gss_svc_cred_get(nd, nmreq);
7038 if (error) {
7039 if (error == EINVAL) {
7040 goto nfsmout; // drop the request
7041 }
7042 nd->nd_repstat = error;
7043 nd->nd_procnum = NFSPROC_NOOP;
7044 return 0;
7045 }
7046 } else {
7047 if (nd->nd_procnum == NFSPROC_NULL) { // assume it's AUTH_NONE
7048 return 0;
7049 }
7050 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_REJECTCRED);
7051 nd->nd_procnum = NFSPROC_NOOP;
7052 return 0;
7053 }
7054 return 0;
7055 nfsmout:
7056 if (IS_VALID_CRED(nd->nd_cr)) {
7057 kauth_cred_unref(&nd->nd_cr);
7058 }
7059 nfsm_chain_cleanup(nmreq);
7060 return error;
7061 }
7062
7063 /*
7064 * Search for a sleeping nfsd and wake it up.
7065 * SIDE EFFECT: If none found, make sure the socket is queued up so that one
7066 * of the running nfsds will go look for the work in the nfsrv_sockwait list.
7067 * Note: Must be called with nfsd_mutex held.
7068 */
7069 void
7070 nfsrv_wakenfsd(struct nfsrv_sock *slp)
7071 {
7072 struct nfsd *nd;
7073
7074 if ((slp->ns_flag & SLP_VALID) == 0) {
7075 return;
7076 }
7077
7078 lck_rw_lock_exclusive(&slp->ns_rwlock);
7079 /* if there's work to do on this socket, make sure it's queued up */
7080 if ((slp->ns_flag & SLP_WORKTODO) && !(slp->ns_flag & SLP_QUEUED)) {
7081 TAILQ_INSERT_TAIL(&nfsrv_sockwait, slp, ns_svcq);
7082 slp->ns_flag |= SLP_WAITQ;
7083 }
7084 lck_rw_done(&slp->ns_rwlock);
7085
7086 /* wake up a waiting nfsd, if possible */
7087 nd = TAILQ_FIRST(&nfsd_queue);
7088 if (!nd) {
7089 return;
7090 }
7091
7092 TAILQ_REMOVE(&nfsd_queue, nd, nfsd_queue);
7093 nd->nfsd_flag &= ~NFSD_WAITING;
7094 wakeup(nd);
7095 }
7096
7097 #endif /* CONFIG_NFS_SERVER */
7098
7099 #endif /* CONFIG_NFS */