]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2019 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ | |
29 | /* | |
30 | * Copyright (c) 1989, 1991, 1993, 1995 | |
31 | * The Regents of the University of California. All rights reserved. | |
32 | * | |
33 | * This code is derived from software contributed to Berkeley by | |
34 | * Rick Macklem at The University of Guelph. | |
35 | * | |
36 | * Redistribution and use in source and binary forms, with or without | |
37 | * modification, are permitted provided that the following conditions | |
38 | * are met: | |
39 | * 1. Redistributions of source code must retain the above copyright | |
40 | * notice, this list of conditions and the following disclaimer. | |
41 | * 2. Redistributions in binary form must reproduce the above copyright | |
42 | * notice, this list of conditions and the following disclaimer in the | |
43 | * documentation and/or other materials provided with the distribution. | |
44 | * 3. All advertising materials mentioning features or use of this software | |
45 | * must display the following acknowledgement: | |
46 | * This product includes software developed by the University of | |
47 | * California, Berkeley and its contributors. | |
48 | * 4. Neither the name of the University nor the names of its contributors | |
49 | * may be used to endorse or promote products derived from this software | |
50 | * without specific prior written permission. | |
51 | * | |
52 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
53 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
54 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
55 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
56 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
57 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
58 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
59 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
60 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
61 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
62 | * SUCH DAMAGE. | |
63 | * | |
64 | * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95 | |
65 | * FreeBSD-Id: nfs_socket.c,v 1.30 1997/10/28 15:59:07 bde Exp $ | |
66 | */ | |
67 | ||
68 | /* | |
69 | * Socket operations for use by nfs | |
70 | */ | |
71 | ||
72 | #include <sys/param.h> | |
73 | #include <sys/systm.h> | |
74 | #include <sys/proc.h> | |
75 | #include <sys/signalvar.h> | |
76 | #include <sys/kauth.h> | |
77 | #include <sys/mount_internal.h> | |
78 | #include <sys/kernel.h> | |
79 | #include <sys/kpi_mbuf.h> | |
80 | #include <sys/malloc.h> | |
81 | #include <sys/vnode.h> | |
82 | #include <sys/domain.h> | |
83 | #include <sys/protosw.h> | |
84 | #include <sys/socket.h> | |
85 | #include <sys/un.h> | |
86 | #include <sys/syslog.h> | |
87 | #include <sys/tprintf.h> | |
88 | #include <libkern/OSAtomic.h> | |
89 | ||
90 | #include <sys/time.h> | |
91 | #include <kern/clock.h> | |
92 | #include <kern/task.h> | |
93 | #include <kern/thread.h> | |
94 | #include <kern/thread_call.h> | |
95 | #include <sys/user.h> | |
96 | #include <sys/acct.h> | |
97 | ||
98 | #include <netinet/in.h> | |
99 | #include <netinet/tcp.h> | |
100 | ||
101 | #include <nfs/rpcv2.h> | |
102 | #include <nfs/krpc.h> | |
103 | #include <nfs/nfsproto.h> | |
104 | #include <nfs/nfs.h> | |
105 | #include <nfs/xdr_subs.h> | |
106 | #include <nfs/nfsm_subs.h> | |
107 | #include <nfs/nfs_gss.h> | |
108 | #include <nfs/nfsmount.h> | |
109 | #include <nfs/nfsnode.h> | |
110 | ||
111 | #define NFS_SOCK_DBG(...) NFS_DBG(NFS_FAC_SOCK, 7, ## __VA_ARGS__) | |
112 | #define NFS_SOCK_DUMP_MBUF(msg, mb) if (NFS_IS_DBG(NFS_FAC_SOCK, 15)) nfs_dump_mbuf(__func__, __LINE__, (msg), (mb)) | |
113 | ||
114 | /* XXX */ | |
115 | boolean_t current_thread_aborted(void); | |
116 | kern_return_t thread_terminate(thread_t); | |
117 | ||
118 | ||
119 | #if NFSSERVER | |
120 | int nfsrv_sock_max_rec_queue_length = 128; /* max # RPC records queued on (UDP) socket */ | |
121 | ||
122 | int nfsrv_getstream(struct nfsrv_sock *, int); | |
123 | int nfsrv_getreq(struct nfsrv_descript *); | |
124 | extern int nfsv3_procid[NFS_NPROCS]; | |
125 | #endif /* NFSSERVER */ | |
126 | ||
127 | /* | |
128 | * compare two sockaddr structures | |
129 | */ | |
130 | int | |
131 | nfs_sockaddr_cmp(struct sockaddr *sa1, struct sockaddr *sa2) | |
132 | { | |
133 | if (!sa1) { | |
134 | return -1; | |
135 | } | |
136 | if (!sa2) { | |
137 | return 1; | |
138 | } | |
139 | if (sa1->sa_family != sa2->sa_family) { | |
140 | return (sa1->sa_family < sa2->sa_family) ? -1 : 1; | |
141 | } | |
142 | if (sa1->sa_len != sa2->sa_len) { | |
143 | return (sa1->sa_len < sa2->sa_len) ? -1 : 1; | |
144 | } | |
145 | if (sa1->sa_family == AF_INET) { | |
146 | return bcmp(&((struct sockaddr_in*)sa1)->sin_addr, | |
147 | &((struct sockaddr_in*)sa2)->sin_addr, sizeof(((struct sockaddr_in*)sa1)->sin_addr)); | |
148 | } | |
149 | if (sa1->sa_family == AF_INET6) { | |
150 | return bcmp(&((struct sockaddr_in6*)sa1)->sin6_addr, | |
151 | &((struct sockaddr_in6*)sa2)->sin6_addr, sizeof(((struct sockaddr_in6*)sa1)->sin6_addr)); | |
152 | } | |
153 | return -1; | |
154 | } | |
155 | ||
156 | #if NFSCLIENT | |
157 | ||
158 | int nfs_connect_search_new_socket(struct nfsmount *, struct nfs_socket_search *, struct timeval *); | |
159 | int nfs_connect_search_socket_connect(struct nfsmount *, struct nfs_socket *, int); | |
160 | int nfs_connect_search_ping(struct nfsmount *, struct nfs_socket *, struct timeval *); | |
161 | void nfs_connect_search_socket_found(struct nfsmount *, struct nfs_socket_search *, struct nfs_socket *); | |
162 | void nfs_connect_search_socket_reap(struct nfsmount *, struct nfs_socket_search *, struct timeval *); | |
163 | int nfs_connect_search_check(struct nfsmount *, struct nfs_socket_search *, struct timeval *); | |
164 | int nfs_reconnect(struct nfsmount *); | |
165 | int nfs_connect_setup(struct nfsmount *); | |
166 | void nfs_mount_sock_thread(void *, wait_result_t); | |
167 | void nfs_udp_rcv(socket_t, void*, int); | |
168 | void nfs_tcp_rcv(socket_t, void*, int); | |
169 | void nfs_sock_poke(struct nfsmount *); | |
170 | void nfs_request_match_reply(struct nfsmount *, mbuf_t); | |
171 | void nfs_reqdequeue(struct nfsreq *); | |
172 | void nfs_reqbusy(struct nfsreq *); | |
173 | struct nfsreq *nfs_reqnext(struct nfsreq *); | |
174 | int nfs_wait_reply(struct nfsreq *); | |
175 | void nfs_softterm(struct nfsreq *); | |
176 | int nfs_can_squish(struct nfsmount *); | |
177 | int nfs_is_squishy(struct nfsmount *); | |
178 | int nfs_is_dead(int, struct nfsmount *); | |
179 | ||
180 | /* | |
181 | * Estimate rto for an nfs rpc sent via. an unreliable datagram. | |
182 | * Use the mean and mean deviation of rtt for the appropriate type of rpc | |
183 | * for the frequent rpcs and a default for the others. | |
184 | * The justification for doing "other" this way is that these rpcs | |
185 | * happen so infrequently that timer est. would probably be stale. | |
186 | * Also, since many of these rpcs are | |
187 | * non-idempotent, a conservative timeout is desired. | |
188 | * getattr, lookup - A+2D | |
189 | * read, write - A+4D | |
190 | * other - nm_timeo | |
191 | */ | |
192 | #define NFS_RTO(n, t) \ | |
193 | ((t) == 0 ? (n)->nm_timeo : \ | |
194 | ((t) < 3 ? \ | |
195 | (((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \ | |
196 | ((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1))) | |
197 | #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum] - 1] | |
198 | #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum] - 1] | |
199 | ||
200 | /* | |
201 | * Defines which timer to use for the procnum. | |
202 | * 0 - default | |
203 | * 1 - getattr | |
204 | * 2 - lookup | |
205 | * 3 - read | |
206 | * 4 - write | |
207 | */ | |
208 | static const int proct[] = { | |
209 | [NFSPROC_NULL] = 0, | |
210 | [NFSPROC_GETATTR] = 1, | |
211 | [NFSPROC_SETATTR] = 0, | |
212 | [NFSPROC_LOOKUP] = 2, | |
213 | [NFSPROC_ACCESS] = 1, | |
214 | [NFSPROC_READLINK] = 3, | |
215 | [NFSPROC_READ] = 3, | |
216 | [NFSPROC_WRITE] = 4, | |
217 | [NFSPROC_CREATE] = 0, | |
218 | [NFSPROC_MKDIR] = 0, | |
219 | [NFSPROC_SYMLINK] = 0, | |
220 | [NFSPROC_MKNOD] = 0, | |
221 | [NFSPROC_REMOVE] = 0, | |
222 | [NFSPROC_RMDIR] = 0, | |
223 | [NFSPROC_RENAME] = 0, | |
224 | [NFSPROC_LINK] = 0, | |
225 | [NFSPROC_READDIR] = 3, | |
226 | [NFSPROC_READDIRPLUS] = 3, | |
227 | [NFSPROC_FSSTAT] = 0, | |
228 | [NFSPROC_FSINFO] = 0, | |
229 | [NFSPROC_PATHCONF] = 0, | |
230 | [NFSPROC_COMMIT] = 0, | |
231 | [NFSPROC_NOOP] = 0, | |
232 | }; | |
233 | ||
234 | /* | |
235 | * There is a congestion window for outstanding rpcs maintained per mount | |
236 | * point. The cwnd size is adjusted in roughly the way that: | |
237 | * Van Jacobson, Congestion avoidance and Control, In "Proceedings of | |
238 | * SIGCOMM '88". ACM, August 1988. | |
239 | * describes for TCP. The cwnd size is chopped in half on a retransmit timeout | |
240 | * and incremented by 1/cwnd when each rpc reply is received and a full cwnd | |
241 | * of rpcs is in progress. | |
242 | * (The sent count and cwnd are scaled for integer arith.) | |
243 | * Variants of "slow start" were tried and were found to be too much of a | |
244 | * performance hit (ave. rtt 3 times larger), | |
245 | * I suspect due to the large rtt that nfs rpcs have. | |
246 | */ | |
247 | #define NFS_CWNDSCALE 256 | |
248 | #define NFS_MAXCWND (NFS_CWNDSCALE * 32) | |
249 | static int nfs_backoff[8] = { 2, 4, 8, 16, 32, 64, 128, 256, }; | |
250 | ||
251 | /* | |
252 | * Increment location index to next address/server/location. | |
253 | */ | |
254 | void | |
255 | nfs_location_next(struct nfs_fs_locations *nlp, struct nfs_location_index *nlip) | |
256 | { | |
257 | uint8_t loc = nlip->nli_loc; | |
258 | uint8_t serv = nlip->nli_serv; | |
259 | uint8_t addr = nlip->nli_addr; | |
260 | ||
261 | /* move to next address */ | |
262 | addr++; | |
263 | if (addr >= nlp->nl_locations[loc]->nl_servers[serv]->ns_addrcount) { | |
264 | /* no more addresses on current server, go to first address of next server */ | |
265 | next_server: | |
266 | addr = 0; | |
267 | serv++; | |
268 | if (serv >= nlp->nl_locations[loc]->nl_servcount) { | |
269 | /* no more servers on current location, go to first server of next location */ | |
270 | serv = 0; | |
271 | loc++; | |
272 | if (loc >= nlp->nl_numlocs) { | |
273 | loc = 0; /* after last location, wrap back around to first location */ | |
274 | } | |
275 | } | |
276 | } | |
277 | /* | |
278 | * It's possible for this next server to not have any addresses. | |
279 | * Check for that here and go to the next server. | |
280 | * But bail out if we've managed to come back around to the original | |
281 | * location that was passed in. (That would mean no servers had any | |
282 | * addresses. And we don't want to spin here forever.) | |
283 | */ | |
284 | if ((loc == nlip->nli_loc) && (serv == nlip->nli_serv) && (addr == nlip->nli_addr)) { | |
285 | return; | |
286 | } | |
287 | if (addr >= nlp->nl_locations[loc]->nl_servers[serv]->ns_addrcount) { | |
288 | goto next_server; | |
289 | } | |
290 | ||
291 | nlip->nli_loc = loc; | |
292 | nlip->nli_serv = serv; | |
293 | nlip->nli_addr = addr; | |
294 | } | |
295 | ||
296 | /* | |
297 | * Compare two location indices. | |
298 | */ | |
299 | int | |
300 | nfs_location_index_cmp(struct nfs_location_index *nlip1, struct nfs_location_index *nlip2) | |
301 | { | |
302 | if (nlip1->nli_loc != nlip2->nli_loc) { | |
303 | return nlip1->nli_loc - nlip2->nli_loc; | |
304 | } | |
305 | if (nlip1->nli_serv != nlip2->nli_serv) { | |
306 | return nlip1->nli_serv - nlip2->nli_serv; | |
307 | } | |
308 | return nlip1->nli_addr - nlip2->nli_addr; | |
309 | } | |
310 | ||
311 | /* | |
312 | * Get the mntfromname (or path portion only) for a given location. | |
313 | */ | |
314 | void | |
315 | nfs_location_mntfromname(struct nfs_fs_locations *locs, struct nfs_location_index idx, char *s, int size, int pathonly) | |
316 | { | |
317 | struct nfs_fs_location *fsl = locs->nl_locations[idx.nli_loc]; | |
318 | char *p; | |
319 | int cnt, i; | |
320 | ||
321 | p = s; | |
322 | if (!pathonly) { | |
323 | char *name = fsl->nl_servers[idx.nli_serv]->ns_name; | |
324 | if (name == NULL) { | |
325 | name = ""; | |
326 | } | |
327 | if (*name == '\0') { | |
328 | if (*fsl->nl_servers[idx.nli_serv]->ns_addresses[idx.nli_addr]) { | |
329 | name = fsl->nl_servers[idx.nli_serv]->ns_addresses[idx.nli_addr]; | |
330 | } | |
331 | cnt = snprintf(p, size, "<%s>:", name); | |
332 | } else { | |
333 | cnt = snprintf(p, size, "%s:", name); | |
334 | } | |
335 | p += cnt; | |
336 | size -= cnt; | |
337 | } | |
338 | if (fsl->nl_path.np_compcount == 0) { | |
339 | /* mounting root export on server */ | |
340 | if (size > 0) { | |
341 | *p++ = '/'; | |
342 | *p++ = '\0'; | |
343 | } | |
344 | return; | |
345 | } | |
346 | /* append each server path component */ | |
347 | for (i = 0; (size > 0) && (i < (int)fsl->nl_path.np_compcount); i++) { | |
348 | cnt = snprintf(p, size, "/%s", fsl->nl_path.np_components[i]); | |
349 | p += cnt; | |
350 | size -= cnt; | |
351 | } | |
352 | } | |
353 | ||
354 | /* | |
355 | * NFS client connect socket upcall. | |
356 | * (Used only during socket connect/search.) | |
357 | */ | |
358 | void | |
359 | nfs_connect_upcall(socket_t so, void *arg, __unused int waitflag) | |
360 | { | |
361 | struct nfs_socket *nso = arg; | |
362 | size_t rcvlen; | |
363 | mbuf_t m; | |
364 | int error = 0, recv = 1; | |
365 | ||
366 | if (nso->nso_flags & NSO_CONNECTING) { | |
367 | NFS_SOCK_DBG("nfs connect - socket %p upcall - connecting flags = %8.8x\n", nso, nso->nso_flags); | |
368 | wakeup(nso->nso_wake); | |
369 | return; | |
370 | } | |
371 | ||
372 | lck_mtx_lock(&nso->nso_lock); | |
373 | if ((nso->nso_flags & (NSO_UPCALL | NSO_DISCONNECTING | NSO_DEAD)) || !(nso->nso_flags & NSO_PINGING)) { | |
374 | NFS_SOCK_DBG("nfs connect - socket %p upcall - nevermind\n", nso); | |
375 | lck_mtx_unlock(&nso->nso_lock); | |
376 | return; | |
377 | } | |
378 | NFS_SOCK_DBG("nfs connect - socket %p upcall %8.8x\n", nso, nso->nso_flags); | |
379 | nso->nso_flags |= NSO_UPCALL; | |
380 | ||
381 | /* loop while we make error-free progress */ | |
382 | while (!error && recv) { | |
383 | /* make sure we're still interested in this socket */ | |
384 | if (nso->nso_flags & (NSO_DISCONNECTING | NSO_DEAD)) { | |
385 | break; | |
386 | } | |
387 | lck_mtx_unlock(&nso->nso_lock); | |
388 | m = NULL; | |
389 | if (nso->nso_sotype == SOCK_STREAM) { | |
390 | error = nfs_rpc_record_read(so, &nso->nso_rrs, MSG_DONTWAIT, &recv, &m); | |
391 | NFS_SOCK_DBG("nfs_rpc_record_read returned %d recv = %d\n", error, recv); | |
392 | } else { | |
393 | rcvlen = 1000000; | |
394 | error = sock_receivembuf(so, NULL, &m, MSG_DONTWAIT, &rcvlen); | |
395 | recv = m ? 1 : 0; | |
396 | } | |
397 | lck_mtx_lock(&nso->nso_lock); | |
398 | if (m) { | |
399 | /* match response with request */ | |
400 | struct nfsm_chain nmrep; | |
401 | uint32_t reply = 0, rxid = 0, verf_type, verf_len; | |
402 | uint32_t reply_status, rejected_status, accepted_status; | |
403 | ||
404 | NFS_SOCK_DUMP_MBUF("Got mbuf from ping", m); | |
405 | nfsm_chain_dissect_init(error, &nmrep, m); | |
406 | nfsm_chain_get_32(error, &nmrep, rxid); | |
407 | nfsm_chain_get_32(error, &nmrep, reply); | |
408 | if (!error && ((reply != RPC_REPLY) || (rxid != nso->nso_pingxid))) { | |
409 | error = EBADRPC; | |
410 | } | |
411 | nfsm_chain_get_32(error, &nmrep, reply_status); | |
412 | if (!error && (reply_status == RPC_MSGDENIED)) { | |
413 | nfsm_chain_get_32(error, &nmrep, rejected_status); | |
414 | if (!error) { | |
415 | error = (rejected_status == RPC_MISMATCH) ? ERPCMISMATCH : EACCES; | |
416 | } | |
417 | } | |
418 | nfsm_chain_get_32(error, &nmrep, verf_type); /* verifier flavor */ | |
419 | nfsm_chain_get_32(error, &nmrep, verf_len); /* verifier length */ | |
420 | nfsmout_if(error); | |
421 | if (verf_len) { | |
422 | nfsm_chain_adv(error, &nmrep, nfsm_rndup(verf_len)); | |
423 | } | |
424 | nfsm_chain_get_32(error, &nmrep, accepted_status); | |
425 | nfsmout_if(error); | |
426 | NFS_SOCK_DBG("Recevied accepted_status of %d nso_version = %d\n", accepted_status, nso->nso_version); | |
427 | if ((accepted_status == RPC_PROGMISMATCH) && !nso->nso_version) { | |
428 | uint32_t minvers, maxvers; | |
429 | nfsm_chain_get_32(error, &nmrep, minvers); | |
430 | nfsm_chain_get_32(error, &nmrep, maxvers); | |
431 | nfsmout_if(error); | |
432 | if (nso->nso_protocol == PMAPPROG) { | |
433 | if ((minvers > RPCBVERS4) || (maxvers < PMAPVERS)) { | |
434 | error = EPROGMISMATCH; | |
435 | } else if ((nso->nso_saddr->sa_family == AF_INET) && | |
436 | (PMAPVERS >= minvers) && (PMAPVERS <= maxvers)) { | |
437 | nso->nso_version = PMAPVERS; | |
438 | } else if (nso->nso_saddr->sa_family == AF_INET6) { | |
439 | if ((RPCBVERS4 >= minvers) && (RPCBVERS4 <= maxvers)) { | |
440 | nso->nso_version = RPCBVERS4; | |
441 | } else if ((RPCBVERS3 >= minvers) && (RPCBVERS3 <= maxvers)) { | |
442 | nso->nso_version = RPCBVERS3; | |
443 | } | |
444 | } | |
445 | } else if (nso->nso_protocol == NFS_PROG) { | |
446 | int vers; | |
447 | ||
448 | /* | |
449 | * N.B. Both portmapper and rpcbind V3 are happy to return | |
450 | * addresses for other versions than the one you ask (getport or | |
451 | * getaddr) and thus we may have fallen to this code path. So if | |
452 | * we get a version that we support, use highest supported | |
453 | * version. This assumes that the server supports all versions | |
454 | * between minvers and maxvers. Note for IPv6 we will try and | |
455 | * use rpcbind V4 which has getversaddr and we should not get | |
456 | * here if that was successful. | |
457 | */ | |
458 | for (vers = nso->nso_nfs_max_vers; vers >= (int)nso->nso_nfs_min_vers; vers--) { | |
459 | if (vers >= (int)minvers && vers <= (int)maxvers) { | |
460 | break; | |
461 | } | |
462 | } | |
463 | nso->nso_version = (vers < (int)nso->nso_nfs_min_vers) ? 0 : vers; | |
464 | } | |
465 | if (!error && nso->nso_version) { | |
466 | accepted_status = RPC_SUCCESS; | |
467 | } | |
468 | } | |
469 | if (!error) { | |
470 | switch (accepted_status) { | |
471 | case RPC_SUCCESS: | |
472 | error = 0; | |
473 | break; | |
474 | case RPC_PROGUNAVAIL: | |
475 | error = EPROGUNAVAIL; | |
476 | break; | |
477 | case RPC_PROGMISMATCH: | |
478 | error = EPROGMISMATCH; | |
479 | break; | |
480 | case RPC_PROCUNAVAIL: | |
481 | error = EPROCUNAVAIL; | |
482 | break; | |
483 | case RPC_GARBAGE: | |
484 | error = EBADRPC; | |
485 | break; | |
486 | case RPC_SYSTEM_ERR: | |
487 | default: | |
488 | error = EIO; | |
489 | break; | |
490 | } | |
491 | } | |
492 | nfsmout: | |
493 | nso->nso_flags &= ~NSO_PINGING; | |
494 | if (error) { | |
495 | NFS_SOCK_DBG("nfs upcalled failed for %d program %d vers error = %d\n", | |
496 | nso->nso_protocol, nso->nso_version, error); | |
497 | nso->nso_error = error; | |
498 | nso->nso_flags |= NSO_DEAD; | |
499 | } else { | |
500 | nso->nso_flags |= NSO_VERIFIED; | |
501 | } | |
502 | mbuf_freem(m); | |
503 | /* wake up search thread */ | |
504 | wakeup(nso->nso_wake); | |
505 | break; | |
506 | } | |
507 | } | |
508 | ||
509 | nso->nso_flags &= ~NSO_UPCALL; | |
510 | if ((error != EWOULDBLOCK) && (error || !recv)) { | |
511 | /* problems with the socket... */ | |
512 | NFS_SOCK_DBG("connect upcall failed %d\n", error); | |
513 | nso->nso_error = error ? error : EPIPE; | |
514 | nso->nso_flags |= NSO_DEAD; | |
515 | wakeup(nso->nso_wake); | |
516 | } | |
517 | if (nso->nso_flags & NSO_DISCONNECTING) { | |
518 | wakeup(&nso->nso_flags); | |
519 | } | |
520 | lck_mtx_unlock(&nso->nso_lock); | |
521 | } | |
522 | ||
523 | /* | |
524 | * Create/initialize an nfs_socket structure. | |
525 | */ | |
526 | int | |
527 | nfs_socket_create( | |
528 | struct nfsmount *nmp, | |
529 | struct sockaddr *sa, | |
530 | int sotype, | |
531 | in_port_t port, | |
532 | uint32_t protocol, | |
533 | uint32_t vers, | |
534 | int resvport, | |
535 | struct nfs_socket **nsop) | |
536 | { | |
537 | struct nfs_socket *nso; | |
538 | struct timeval now; | |
539 | int error; | |
540 | #define NFS_SOCKET_DEBUGGING | |
541 | #ifdef NFS_SOCKET_DEBUGGING | |
542 | char naddr[sizeof((struct sockaddr_un *)0)->sun_path]; | |
543 | void *sinaddr; | |
544 | ||
545 | switch (sa->sa_family) { | |
546 | case AF_INET: | |
547 | case AF_INET6: | |
548 | if (sa->sa_family == AF_INET) { | |
549 | sinaddr = &((struct sockaddr_in*)sa)->sin_addr; | |
550 | } else { | |
551 | sinaddr = &((struct sockaddr_in6*)sa)->sin6_addr; | |
552 | } | |
553 | if (inet_ntop(sa->sa_family, sinaddr, naddr, sizeof(naddr)) != naddr) { | |
554 | strlcpy(naddr, "<unknown>", sizeof(naddr)); | |
555 | } | |
556 | break; | |
557 | case AF_LOCAL: | |
558 | strlcpy(naddr, ((struct sockaddr_un *)sa)->sun_path, sizeof(naddr)); | |
559 | break; | |
560 | default: | |
561 | strlcpy(naddr, "<unsupported address family>", sizeof(naddr)); | |
562 | break; | |
563 | } | |
564 | #else | |
565 | char naddr[1] = { 0 }; | |
566 | #endif | |
567 | ||
568 | *nsop = NULL; | |
569 | ||
570 | /* Create the socket. */ | |
571 | MALLOC(nso, struct nfs_socket *, sizeof(struct nfs_socket), M_TEMP, M_WAITOK | M_ZERO); | |
572 | if (nso) { | |
573 | MALLOC(nso->nso_saddr, struct sockaddr *, sa->sa_len, M_SONAME, M_WAITOK | M_ZERO); | |
574 | } | |
575 | if (!nso || !nso->nso_saddr) { | |
576 | if (nso) { | |
577 | FREE(nso, M_TEMP); | |
578 | } | |
579 | return ENOMEM; | |
580 | } | |
581 | lck_mtx_init(&nso->nso_lock, nfs_request_grp, LCK_ATTR_NULL); | |
582 | nso->nso_sotype = sotype; | |
583 | if (nso->nso_sotype == SOCK_STREAM) { | |
584 | nfs_rpc_record_state_init(&nso->nso_rrs); | |
585 | } | |
586 | microuptime(&now); | |
587 | nso->nso_timestamp = now.tv_sec; | |
588 | bcopy(sa, nso->nso_saddr, sa->sa_len); | |
589 | switch (sa->sa_family) { | |
590 | case AF_INET: | |
591 | case AF_INET6: | |
592 | if (sa->sa_family == AF_INET) { | |
593 | ((struct sockaddr_in*)nso->nso_saddr)->sin_port = htons(port); | |
594 | } else if (sa->sa_family == AF_INET6) { | |
595 | ((struct sockaddr_in6*)nso->nso_saddr)->sin6_port = htons(port); | |
596 | } | |
597 | break; | |
598 | case AF_LOCAL: | |
599 | break; | |
600 | } | |
601 | nso->nso_protocol = protocol; | |
602 | nso->nso_version = vers; | |
603 | nso->nso_nfs_min_vers = PVER2MAJOR(nmp->nm_min_vers); | |
604 | nso->nso_nfs_max_vers = PVER2MAJOR(nmp->nm_max_vers); | |
605 | ||
606 | error = sock_socket(sa->sa_family, nso->nso_sotype, 0, NULL, NULL, &nso->nso_so); | |
607 | ||
608 | /* Some servers require that the client port be a reserved port number. */ | |
609 | if (!error && resvport && ((sa->sa_family == AF_INET) || (sa->sa_family == AF_INET6))) { | |
610 | struct sockaddr_storage ss; | |
611 | int level = (sa->sa_family == AF_INET) ? IPPROTO_IP : IPPROTO_IPV6; | |
612 | int optname = (sa->sa_family == AF_INET) ? IP_PORTRANGE : IPV6_PORTRANGE; | |
613 | int portrange = IP_PORTRANGE_LOW; | |
614 | ||
615 | error = sock_setsockopt(nso->nso_so, level, optname, &portrange, sizeof(portrange)); | |
616 | if (!error) { /* bind now to check for failure */ | |
617 | ss.ss_len = sa->sa_len; | |
618 | ss.ss_family = sa->sa_family; | |
619 | if (ss.ss_family == AF_INET) { | |
620 | ((struct sockaddr_in*)&ss)->sin_addr.s_addr = INADDR_ANY; | |
621 | ((struct sockaddr_in*)&ss)->sin_port = htons(0); | |
622 | } else if (ss.ss_family == AF_INET6) { | |
623 | ((struct sockaddr_in6*)&ss)->sin6_addr = in6addr_any; | |
624 | ((struct sockaddr_in6*)&ss)->sin6_port = htons(0); | |
625 | } else { | |
626 | error = EINVAL; | |
627 | } | |
628 | if (!error) { | |
629 | error = sock_bind(nso->nso_so, (struct sockaddr*)&ss); | |
630 | } | |
631 | } | |
632 | } | |
633 | ||
634 | if (error) { | |
635 | NFS_SOCK_DBG("nfs connect %s error %d creating socket %p %s type %d%s port %d prot %d %d\n", | |
636 | vfs_statfs(nmp->nm_mountp)->f_mntfromname, error, nso, naddr, sotype, | |
637 | resvport ? "r" : "", port, protocol, vers); | |
638 | nfs_socket_destroy(nso); | |
639 | } else { | |
640 | NFS_SOCK_DBG("nfs connect %s created socket %p <%s> type %d%s port %d prot %d %d\n", | |
641 | vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, naddr, | |
642 | sotype, resvport ? "r" : "", port, protocol, vers); | |
643 | *nsop = nso; | |
644 | } | |
645 | return error; | |
646 | } | |
647 | ||
648 | /* | |
649 | * Destroy an nfs_socket structure. | |
650 | */ | |
651 | void | |
652 | nfs_socket_destroy(struct nfs_socket *nso) | |
653 | { | |
654 | struct timespec ts = { .tv_sec = 4, .tv_nsec = 0 }; | |
655 | ||
656 | NFS_SOCK_DBG("Destoring socket %p flags = %8.8x error = %d\n", nso, nso->nso_flags, nso->nso_error); | |
657 | lck_mtx_lock(&nso->nso_lock); | |
658 | nso->nso_flags |= NSO_DISCONNECTING; | |
659 | if (nso->nso_flags & NSO_UPCALL) { /* give upcall a chance to complete */ | |
660 | msleep(&nso->nso_flags, &nso->nso_lock, PZERO - 1, "nfswaitupcall", &ts); | |
661 | } | |
662 | lck_mtx_unlock(&nso->nso_lock); | |
663 | sock_shutdown(nso->nso_so, SHUT_RDWR); | |
664 | sock_close(nso->nso_so); | |
665 | if (nso->nso_sotype == SOCK_STREAM) { | |
666 | nfs_rpc_record_state_cleanup(&nso->nso_rrs); | |
667 | } | |
668 | lck_mtx_destroy(&nso->nso_lock, nfs_request_grp); | |
669 | if (nso->nso_saddr) { | |
670 | FREE(nso->nso_saddr, M_SONAME); | |
671 | } | |
672 | if (nso->nso_saddr2) { | |
673 | FREE(nso->nso_saddr2, M_SONAME); | |
674 | } | |
675 | NFS_SOCK_DBG("nfs connect - socket %p destroyed\n", nso); | |
676 | FREE(nso, M_TEMP); | |
677 | } | |
678 | ||
679 | /* | |
680 | * Set common socket options on an nfs_socket. | |
681 | */ | |
682 | void | |
683 | nfs_socket_options(struct nfsmount *nmp, struct nfs_socket *nso) | |
684 | { | |
685 | /* | |
686 | * Set socket send/receive timeouts | |
687 | * - Receive timeout shouldn't matter because most receives are performed | |
688 | * in the socket upcall non-blocking. | |
689 | * - Send timeout should allow us to react to a blocked socket. | |
690 | * Soft mounts will want to abort sooner. | |
691 | */ | |
692 | struct timeval timeo; | |
693 | int on = 1, proto; | |
694 | ||
695 | timeo.tv_usec = 0; | |
696 | timeo.tv_sec = (NMFLAG(nmp, SOFT) || nfs_can_squish(nmp)) ? 5 : 60; | |
697 | sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo)); | |
698 | sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo)); | |
699 | if (nso->nso_sotype == SOCK_STREAM) { | |
700 | /* Assume that SOCK_STREAM always requires a connection */ | |
701 | sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof(on)); | |
702 | /* set nodelay for TCP */ | |
703 | sock_gettype(nso->nso_so, NULL, NULL, &proto); | |
704 | if (proto == IPPROTO_TCP) { | |
705 | sock_setsockopt(nso->nso_so, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on)); | |
706 | } | |
707 | } | |
708 | if (nso->nso_sotype == SOCK_DGRAM || nso->nso_saddr->sa_family == AF_LOCAL) { /* set socket buffer sizes for UDP */ | |
709 | int reserve = (nso->nso_sotype == SOCK_DGRAM) ? NFS_UDPSOCKBUF : (2 * 1024 * 1024); | |
710 | sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_SNDBUF, &reserve, sizeof(reserve)); | |
711 | sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_RCVBUF, &reserve, sizeof(reserve)); | |
712 | } | |
713 | /* set SO_NOADDRERR to detect network changes ASAP */ | |
714 | sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on)); | |
715 | /* just playin' it safe with upcalls */ | |
716 | sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on)); | |
717 | /* socket should be interruptible if the mount is */ | |
718 | if (!NMFLAG(nmp, INTR)) { | |
719 | sock_nointerrupt(nso->nso_so, 1); | |
720 | } | |
721 | } | |
722 | ||
723 | /* | |
724 | * Release resources held in an nfs_socket_search. | |
725 | */ | |
726 | void | |
727 | nfs_socket_search_cleanup(struct nfs_socket_search *nss) | |
728 | { | |
729 | struct nfs_socket *nso, *nsonext; | |
730 | ||
731 | TAILQ_FOREACH_SAFE(nso, &nss->nss_socklist, nso_link, nsonext) { | |
732 | TAILQ_REMOVE(&nss->nss_socklist, nso, nso_link); | |
733 | nss->nss_sockcnt--; | |
734 | nfs_socket_destroy(nso); | |
735 | } | |
736 | if (nss->nss_sock) { | |
737 | nfs_socket_destroy(nss->nss_sock); | |
738 | nss->nss_sock = NULL; | |
739 | } | |
740 | } | |
741 | ||
742 | /* | |
743 | * Prefer returning certain errors over others. | |
744 | * This function returns a ranking of the given error. | |
745 | */ | |
746 | int | |
747 | nfs_connect_error_class(int error) | |
748 | { | |
749 | switch (error) { | |
750 | case 0: | |
751 | return 0; | |
752 | case ETIMEDOUT: | |
753 | case EAGAIN: | |
754 | return 1; | |
755 | case EPIPE: | |
756 | case EADDRNOTAVAIL: | |
757 | case ENETDOWN: | |
758 | case ENETUNREACH: | |
759 | case ENETRESET: | |
760 | case ECONNABORTED: | |
761 | case ECONNRESET: | |
762 | case EISCONN: | |
763 | case ENOTCONN: | |
764 | case ESHUTDOWN: | |
765 | case ECONNREFUSED: | |
766 | case EHOSTDOWN: | |
767 | case EHOSTUNREACH: | |
768 | return 2; | |
769 | case ERPCMISMATCH: | |
770 | case EPROCUNAVAIL: | |
771 | case EPROGMISMATCH: | |
772 | case EPROGUNAVAIL: | |
773 | return 3; | |
774 | case EBADRPC: | |
775 | return 4; | |
776 | default: | |
777 | return 5; | |
778 | } | |
779 | } | |
780 | ||
781 | /* | |
782 | * Make sure a socket search returns the best error. | |
783 | */ | |
784 | void | |
785 | nfs_socket_search_update_error(struct nfs_socket_search *nss, int error) | |
786 | { | |
787 | if (nfs_connect_error_class(error) >= nfs_connect_error_class(nss->nss_error)) { | |
788 | nss->nss_error = error; | |
789 | } | |
790 | } | |
791 | ||
792 | /* nfs_connect_search_new_socket: | |
793 | * Given a socket search structure for an nfs mount try to find a new socket from the set of addresses specified | |
794 | * by nss. | |
795 | * | |
796 | * nss_last is set to -1 at initialization to indicate the first time. Its set to -2 if address was found but | |
797 | * could not be used or if a socket timed out. | |
798 | */ | |
799 | int | |
800 | nfs_connect_search_new_socket(struct nfsmount *nmp, struct nfs_socket_search *nss, struct timeval *now) | |
801 | { | |
802 | struct nfs_fs_location *fsl; | |
803 | struct nfs_fs_server *fss; | |
804 | struct sockaddr_storage ss; | |
805 | struct nfs_socket *nso; | |
806 | char *addrstr; | |
807 | int error = 0; | |
808 | ||
809 | ||
810 | NFS_SOCK_DBG("nfs connect %s nss_addrcnt = %d\n", | |
811 | vfs_statfs(nmp->nm_mountp)->f_mntfromname, nss->nss_addrcnt); | |
812 | ||
813 | /* | |
814 | * while there are addresses and: | |
815 | * we have no sockets or | |
816 | * the last address failed and did not produce a socket (nss_last < 0) or | |
817 | * Its been a while (2 seconds) and we have less than the max number of concurrent sockets to search (4) | |
818 | * then attempt to create a socket with the current address. | |
819 | */ | |
820 | while (nss->nss_addrcnt > 0 && ((nss->nss_last < 0) || (nss->nss_sockcnt == 0) || | |
821 | ((nss->nss_sockcnt < 4) && (now->tv_sec >= (nss->nss_last + 2))))) { | |
822 | if (nmp->nm_sockflags & NMSOCK_UNMOUNT) { | |
823 | return EINTR; | |
824 | } | |
825 | /* Can we convert the address to a sockaddr? */ | |
826 | fsl = nmp->nm_locations.nl_locations[nss->nss_nextloc.nli_loc]; | |
827 | fss = fsl->nl_servers[nss->nss_nextloc.nli_serv]; | |
828 | addrstr = fss->ns_addresses[nss->nss_nextloc.nli_addr]; | |
829 | NFS_SOCK_DBG("Trying address %s for program %d on port %d\n", addrstr, nss->nss_protocol, nss->nss_port); | |
830 | if (*addrstr == '\0') { | |
831 | /* | |
832 | * We have an unspecified local domain address. We use the program to translate to | |
833 | * a well known local transport address. We only support PMAPROG and NFS for this. | |
834 | */ | |
835 | if (nss->nss_protocol == PMAPPROG) { | |
836 | addrstr = (nss->nss_sotype == SOCK_DGRAM) ? RPCB_TICLTS_PATH : RPCB_TICOTSORD_PATH; | |
837 | } else if (nss->nss_protocol == NFS_PROG) { | |
838 | addrstr = nmp->nm_nfs_localport; | |
839 | if (!addrstr || *addrstr == '\0') { | |
840 | addrstr = (nss->nss_sotype == SOCK_DGRAM) ? NFS_TICLTS_PATH : NFS_TICOTSORD_PATH; | |
841 | } | |
842 | } | |
843 | NFS_SOCK_DBG("Calling prog %d with <%s>\n", nss->nss_protocol, addrstr); | |
844 | } | |
845 | if (!nfs_uaddr2sockaddr(addrstr, (struct sockaddr*)&ss)) { | |
846 | NFS_SOCK_DBG("Could not convert address %s to socket\n", addrstr); | |
847 | nfs_location_next(&nmp->nm_locations, &nss->nss_nextloc); | |
848 | nss->nss_addrcnt -= 1; | |
849 | nss->nss_last = -2; | |
850 | continue; | |
851 | } | |
852 | /* Check that socket family is acceptable. */ | |
853 | if (nmp->nm_sofamily && (ss.ss_family != nmp->nm_sofamily)) { | |
854 | NFS_SOCK_DBG("Skipping socket family %d, want mount family %d\n", ss.ss_family, nmp->nm_sofamily); | |
855 | nfs_location_next(&nmp->nm_locations, &nss->nss_nextloc); | |
856 | nss->nss_addrcnt -= 1; | |
857 | nss->nss_last = -2; | |
858 | continue; | |
859 | } | |
860 | ||
861 | /* Create the socket. */ | |
862 | error = nfs_socket_create(nmp, (struct sockaddr*)&ss, nss->nss_sotype, | |
863 | nss->nss_port, nss->nss_protocol, nss->nss_version, | |
864 | ((nss->nss_protocol == NFS_PROG) && NMFLAG(nmp, RESVPORT)), &nso); | |
865 | if (error) { | |
866 | return error; | |
867 | } | |
868 | ||
869 | nso->nso_location = nss->nss_nextloc; | |
870 | nso->nso_wake = nss; | |
871 | error = sock_setupcall(nso->nso_so, nfs_connect_upcall, nso); | |
872 | if (error) { | |
873 | NFS_SOCK_DBG("sock_setupcall failed for socket %p setting nfs_connect_upcall error = %d\n", nso, error); | |
874 | lck_mtx_lock(&nso->nso_lock); | |
875 | nso->nso_error = error; | |
876 | nso->nso_flags |= NSO_DEAD; | |
877 | lck_mtx_unlock(&nso->nso_lock); | |
878 | } | |
879 | ||
880 | TAILQ_INSERT_TAIL(&nss->nss_socklist, nso, nso_link); | |
881 | nss->nss_sockcnt++; | |
882 | nfs_location_next(&nmp->nm_locations, &nss->nss_nextloc); | |
883 | nss->nss_addrcnt -= 1; | |
884 | ||
885 | nss->nss_last = now->tv_sec; | |
886 | } | |
887 | ||
888 | if (nss->nss_addrcnt == 0 && nss->nss_last < 0) { | |
889 | nss->nss_last = now->tv_sec; | |
890 | } | |
891 | ||
892 | return error; | |
893 | } | |
894 | ||
895 | /* | |
896 | * nfs_connect_search_socket_connect: Connect an nfs socket nso for nfsmount nmp. | |
897 | * If successful set the socket options for the socket as require from the mount. | |
898 | * | |
899 | * Assumes: nso->nso_lock is held on entry and return. | |
900 | */ | |
901 | int | |
902 | nfs_connect_search_socket_connect(struct nfsmount *nmp, struct nfs_socket *nso, int verbose) | |
903 | { | |
904 | int error; | |
905 | ||
906 | if ((nso->nso_sotype != SOCK_STREAM) && NMFLAG(nmp, NOCONNECT)) { | |
907 | /* no connection needed, just say it's already connected */ | |
908 | NFS_SOCK_DBG("nfs connect %s UDP socket %p noconnect\n", | |
909 | vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso); | |
910 | nso->nso_flags |= NSO_CONNECTED; | |
911 | nfs_socket_options(nmp, nso); | |
912 | return 1; /* Socket is connected and setup */ | |
913 | } else if (!(nso->nso_flags & NSO_CONNECTING)) { | |
914 | /* initiate the connection */ | |
915 | nso->nso_flags |= NSO_CONNECTING; | |
916 | lck_mtx_unlock(&nso->nso_lock); | |
917 | NFS_SOCK_DBG("nfs connect %s connecting socket %p %s\n", | |
918 | vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, | |
919 | nso->nso_saddr->sa_family == AF_LOCAL ? ((struct sockaddr_un*)nso->nso_saddr)->sun_path : ""); | |
920 | error = sock_connect(nso->nso_so, nso->nso_saddr, MSG_DONTWAIT); | |
921 | if (error) { | |
922 | NFS_SOCK_DBG("nfs connect %s connecting socket %p returned %d\n", | |
923 | vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error); | |
924 | } | |
925 | lck_mtx_lock(&nso->nso_lock); | |
926 | if (error && (error != EINPROGRESS)) { | |
927 | nso->nso_error = error; | |
928 | nso->nso_flags |= NSO_DEAD; | |
929 | return 0; | |
930 | } | |
931 | } | |
932 | if (nso->nso_flags & NSO_CONNECTING) { | |
933 | /* check the connection */ | |
934 | if (sock_isconnected(nso->nso_so)) { | |
935 | NFS_SOCK_DBG("nfs connect %s socket %p is connected\n", | |
936 | vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso); | |
937 | nso->nso_flags &= ~NSO_CONNECTING; | |
938 | nso->nso_flags |= NSO_CONNECTED; | |
939 | nfs_socket_options(nmp, nso); | |
940 | return 1; /* Socket is connected and setup */ | |
941 | } else { | |
942 | int optlen = sizeof(error); | |
943 | error = 0; | |
944 | sock_getsockopt(nso->nso_so, SOL_SOCKET, SO_ERROR, &error, &optlen); | |
945 | if (error) { /* we got an error on the socket */ | |
946 | NFS_SOCK_DBG("nfs connect %s socket %p connection error %d\n", | |
947 | vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error); | |
948 | if (verbose) { | |
949 | printf("nfs connect socket error %d for %s\n", | |
950 | error, vfs_statfs(nmp->nm_mountp)->f_mntfromname); | |
951 | } | |
952 | nso->nso_error = error; | |
953 | nso->nso_flags |= NSO_DEAD; | |
954 | return 0; | |
955 | } | |
956 | } | |
957 | } | |
958 | ||
959 | return 0; /* Waiting to be connected */ | |
960 | } | |
961 | ||
962 | /* | |
963 | * nfs_connect_search_ping: Send a null proc on the nso socket. | |
964 | */ | |
965 | int | |
966 | nfs_connect_search_ping(struct nfsmount *nmp, struct nfs_socket *nso, struct timeval *now) | |
967 | { | |
968 | /* initiate a NULL RPC request */ | |
969 | uint64_t xid = nso->nso_pingxid; | |
970 | mbuf_t m, mreq = NULL; | |
971 | struct msghdr msg; | |
972 | size_t reqlen, sentlen; | |
973 | uint32_t vers = nso->nso_version; | |
974 | int error; | |
975 | ||
976 | if (!vers) { | |
977 | if (nso->nso_protocol == PMAPPROG) { | |
978 | vers = (nso->nso_saddr->sa_family == AF_INET) ? PMAPVERS : RPCBVERS4; | |
979 | } else if (nso->nso_protocol == NFS_PROG) { | |
980 | vers = PVER2MAJOR(nmp->nm_max_vers); | |
981 | } | |
982 | } | |
983 | lck_mtx_unlock(&nso->nso_lock); | |
984 | NFS_SOCK_DBG("Pinging socket %p %d %d %d\n", nso, nso->nso_sotype, nso->nso_protocol, vers); | |
985 | error = nfsm_rpchead2(nmp, nso->nso_sotype, nso->nso_protocol, vers, 0, RPCAUTH_SYS, | |
986 | vfs_context_ucred(vfs_context_kernel()), NULL, NULL, &xid, &mreq); | |
987 | lck_mtx_lock(&nso->nso_lock); | |
988 | if (!error) { | |
989 | nso->nso_flags |= NSO_PINGING; | |
990 | nso->nso_pingxid = R_XID32(xid); | |
991 | nso->nso_reqtimestamp = now->tv_sec; | |
992 | bzero(&msg, sizeof(msg)); | |
993 | if ((nso->nso_sotype != SOCK_STREAM) && !sock_isconnected(nso->nso_so)) { | |
994 | msg.msg_name = nso->nso_saddr; | |
995 | msg.msg_namelen = nso->nso_saddr->sa_len; | |
996 | } | |
997 | for (reqlen = 0, m = mreq; m; m = mbuf_next(m)) { | |
998 | reqlen += mbuf_len(m); | |
999 | } | |
1000 | lck_mtx_unlock(&nso->nso_lock); | |
1001 | NFS_SOCK_DUMP_MBUF("Sending ping packet", mreq); | |
1002 | error = sock_sendmbuf(nso->nso_so, &msg, mreq, 0, &sentlen); | |
1003 | NFS_SOCK_DBG("nfs connect %s verifying socket %p send rv %d\n", | |
1004 | vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error); | |
1005 | lck_mtx_lock(&nso->nso_lock); | |
1006 | if (!error && (sentlen != reqlen)) { | |
1007 | error = ETIMEDOUT; | |
1008 | } | |
1009 | } | |
1010 | if (error) { | |
1011 | nso->nso_error = error; | |
1012 | nso->nso_flags |= NSO_DEAD; | |
1013 | return 0; | |
1014 | } | |
1015 | ||
1016 | return 1; | |
1017 | } | |
1018 | ||
1019 | /* | |
1020 | * nfs_connect_search_socket_found: Take the found socket of the socket search list and assign it to the searched socket. | |
1021 | * Set the nfs socket protocol and version if needed. | |
1022 | */ | |
1023 | void | |
1024 | nfs_connect_search_socket_found(struct nfsmount *nmp, struct nfs_socket_search *nss, struct nfs_socket *nso) | |
1025 | { | |
1026 | NFS_SOCK_DBG("nfs connect %s socket %p verified\n", | |
1027 | vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso); | |
1028 | if (!nso->nso_version) { | |
1029 | /* If the version isn't set, the default must have worked. */ | |
1030 | if (nso->nso_protocol == PMAPPROG) { | |
1031 | nso->nso_version = (nso->nso_saddr->sa_family == AF_INET) ? PMAPVERS : RPCBVERS4; | |
1032 | } | |
1033 | if (nso->nso_protocol == NFS_PROG) { | |
1034 | nso->nso_version = PVER2MAJOR(nmp->nm_max_vers); | |
1035 | } | |
1036 | } | |
1037 | TAILQ_REMOVE(&nss->nss_socklist, nso, nso_link); | |
1038 | nss->nss_sockcnt--; | |
1039 | nss->nss_sock = nso; | |
1040 | } | |
1041 | ||
1042 | /* | |
1043 | * nfs_connect_search_socket_reap: For each socket in the search list mark any timed out socket as dead and remove from | |
1044 | * the list. Dead socket are then destroyed. | |
1045 | */ | |
1046 | void | |
1047 | nfs_connect_search_socket_reap(struct nfsmount *nmp __unused, struct nfs_socket_search *nss, struct timeval *now) | |
1048 | { | |
1049 | struct nfs_socket *nso, *nsonext; | |
1050 | ||
1051 | TAILQ_FOREACH_SAFE(nso, &nss->nss_socklist, nso_link, nsonext) { | |
1052 | lck_mtx_lock(&nso->nso_lock); | |
1053 | if (now->tv_sec >= (nso->nso_timestamp + nss->nss_timeo)) { | |
1054 | /* took too long */ | |
1055 | NFS_SOCK_DBG("nfs connect %s socket %p timed out\n", | |
1056 | vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso); | |
1057 | nso->nso_error = ETIMEDOUT; | |
1058 | nso->nso_flags |= NSO_DEAD; | |
1059 | } | |
1060 | if (!(nso->nso_flags & NSO_DEAD)) { | |
1061 | lck_mtx_unlock(&nso->nso_lock); | |
1062 | continue; | |
1063 | } | |
1064 | lck_mtx_unlock(&nso->nso_lock); | |
1065 | NFS_SOCK_DBG("nfs connect %s reaping socket %p error = %d flags = %8.8x\n", | |
1066 | vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, nso->nso_error, nso->nso_flags); | |
1067 | nfs_socket_search_update_error(nss, nso->nso_error); | |
1068 | TAILQ_REMOVE(&nss->nss_socklist, nso, nso_link); | |
1069 | nss->nss_sockcnt--; | |
1070 | nfs_socket_destroy(nso); | |
1071 | /* If there are more sockets to try, force the starting of another socket */ | |
1072 | if (nss->nss_addrcnt > 0) { | |
1073 | nss->nss_last = -2; | |
1074 | } | |
1075 | } | |
1076 | } | |
1077 | ||
1078 | /* | |
1079 | * nfs_connect_search_check: Check on the status of search and wait for replies if needed. | |
1080 | */ | |
1081 | int | |
1082 | nfs_connect_search_check(struct nfsmount *nmp, struct nfs_socket_search *nss, struct timeval *now) | |
1083 | { | |
1084 | int error; | |
1085 | ||
1086 | /* log a warning if connect is taking a while */ | |
1087 | if (((now->tv_sec - nss->nss_timestamp) >= 8) && ((nss->nss_flags & (NSS_VERBOSE | NSS_WARNED)) == NSS_VERBOSE)) { | |
1088 | printf("nfs_connect: socket connect taking a while for %s\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname); | |
1089 | nss->nss_flags |= NSS_WARNED; | |
1090 | } | |
1091 | if (nmp->nm_sockflags & NMSOCK_UNMOUNT) { | |
1092 | return EINTR; | |
1093 | } | |
1094 | if ((error = nfs_sigintr(nmp, NULL, current_thread(), 0))) { | |
1095 | return error; | |
1096 | } | |
1097 | ||
1098 | /* If we were succesfull at sending a ping, wait up to a second for a reply */ | |
1099 | if (nss->nss_last >= 0) { | |
1100 | tsleep(nss, PSOCK, "nfs_connect_search_wait", hz); | |
1101 | } | |
1102 | ||
1103 | return 0; | |
1104 | } | |
1105 | ||
1106 | ||
1107 | /* | |
1108 | * Continue the socket search until we have something to report. | |
1109 | */ | |
1110 | int | |
1111 | nfs_connect_search_loop(struct nfsmount *nmp, struct nfs_socket_search *nss) | |
1112 | { | |
1113 | struct nfs_socket *nso; | |
1114 | struct timeval now; | |
1115 | int error; | |
1116 | int verbose = (nss->nss_flags & NSS_VERBOSE); | |
1117 | ||
1118 | loop: | |
1119 | microuptime(&now); | |
1120 | NFS_SOCK_DBG("nfs connect %s search %ld\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, now.tv_sec); | |
1121 | ||
1122 | /* add a new socket to the socket list if needed and available */ | |
1123 | error = nfs_connect_search_new_socket(nmp, nss, &now); | |
1124 | if (error) { | |
1125 | NFS_SOCK_DBG("nfs connect returned %d\n", error); | |
1126 | return error; | |
1127 | } | |
1128 | ||
1129 | /* check each active socket on the list and try to push it along */ | |
1130 | TAILQ_FOREACH(nso, &nss->nss_socklist, nso_link) { | |
1131 | lck_mtx_lock(&nso->nso_lock); | |
1132 | ||
1133 | /* If not connected connect it */ | |
1134 | if (!(nso->nso_flags & NSO_CONNECTED)) { | |
1135 | if (!nfs_connect_search_socket_connect(nmp, nso, verbose)) { | |
1136 | lck_mtx_unlock(&nso->nso_lock); | |
1137 | continue; | |
1138 | } | |
1139 | } | |
1140 | ||
1141 | /* If the socket hasn't been verified or in a ping, ping it. We also handle UDP retransmits */ | |
1142 | if (!(nso->nso_flags & (NSO_PINGING | NSO_VERIFIED)) || | |
1143 | ((nso->nso_sotype == SOCK_DGRAM) && (now.tv_sec >= nso->nso_reqtimestamp + 2))) { | |
1144 | if (!nfs_connect_search_ping(nmp, nso, &now)) { | |
1145 | lck_mtx_unlock(&nso->nso_lock); | |
1146 | continue; | |
1147 | } | |
1148 | } | |
1149 | ||
1150 | /* Has the socket been verified by the up call routine? */ | |
1151 | if (nso->nso_flags & NSO_VERIFIED) { | |
1152 | /* WOOHOO!! This socket looks good! */ | |
1153 | nfs_connect_search_socket_found(nmp, nss, nso); | |
1154 | lck_mtx_unlock(&nso->nso_lock); | |
1155 | break; | |
1156 | } | |
1157 | lck_mtx_unlock(&nso->nso_lock); | |
1158 | } | |
1159 | ||
1160 | /* Check for timed out sockets and mark as dead and then remove all dead sockets. */ | |
1161 | nfs_connect_search_socket_reap(nmp, nss, &now); | |
1162 | ||
1163 | /* | |
1164 | * Keep looping if we haven't found a socket yet and we have more | |
1165 | * sockets to (continue to) try. | |
1166 | */ | |
1167 | error = 0; | |
1168 | if (!nss->nss_sock && (!TAILQ_EMPTY(&nss->nss_socklist) || nss->nss_addrcnt)) { | |
1169 | error = nfs_connect_search_check(nmp, nss, &now); | |
1170 | if (!error) { | |
1171 | goto loop; | |
1172 | } | |
1173 | } | |
1174 | ||
1175 | NFS_SOCK_DBG("nfs connect %s returning %d\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, error); | |
1176 | return error; | |
1177 | } | |
1178 | ||
1179 | /* | |
1180 | * Initialize a new NFS connection. | |
1181 | * | |
1182 | * Search for a location to connect a socket to and initialize the connection. | |
1183 | * | |
1184 | * An NFS mount may have multiple locations/servers/addresses available. | |
1185 | * We attempt to connect to each one asynchronously and will start | |
1186 | * several sockets in parallel if other locations are slow to answer. | |
1187 | * We'll use the first NFS socket we can successfully set up. | |
1188 | * | |
1189 | * The search may involve contacting the portmapper service first. | |
1190 | * | |
1191 | * A mount's initial connection may require negotiating some parameters such | |
1192 | * as socket type and NFS version. | |
1193 | */ | |
1194 | ||
1195 | int | |
1196 | nfs_connect(struct nfsmount *nmp, int verbose, int timeo) | |
1197 | { | |
1198 | struct nfs_socket_search nss; | |
1199 | struct nfs_socket *nso, *nsonfs; | |
1200 | struct sockaddr_storage ss; | |
1201 | struct sockaddr *saddr, *oldsaddr; | |
1202 | sock_upcall upcall; | |
1203 | #if CONFIG_NFS4 | |
1204 | struct timeval now; | |
1205 | #endif | |
1206 | struct timeval start; | |
1207 | int error, savederror, nfsvers; | |
1208 | int tryv4 = 1; | |
1209 | uint8_t sotype = nmp->nm_sotype ? nmp->nm_sotype : SOCK_STREAM; | |
1210 | fhandle_t *fh = NULL; | |
1211 | char *path = NULL; | |
1212 | in_port_t port; | |
1213 | int addrtotal = 0; | |
1214 | ||
1215 | /* paranoia... check that we have at least one address in the locations */ | |
1216 | uint32_t loc, serv; | |
1217 | for (loc = 0; loc < nmp->nm_locations.nl_numlocs; loc++) { | |
1218 | for (serv = 0; serv < nmp->nm_locations.nl_locations[loc]->nl_servcount; serv++) { | |
1219 | addrtotal += nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addrcount; | |
1220 | if (nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addrcount == 0) { | |
1221 | NFS_SOCK_DBG("nfs connect %s search, server %s has no addresses\n", | |
1222 | vfs_statfs(nmp->nm_mountp)->f_mntfromname, | |
1223 | nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_name); | |
1224 | } | |
1225 | } | |
1226 | } | |
1227 | ||
1228 | if (addrtotal == 0) { | |
1229 | NFS_SOCK_DBG("nfs connect %s search failed, no addresses\n", | |
1230 | vfs_statfs(nmp->nm_mountp)->f_mntfromname); | |
1231 | return EINVAL; | |
1232 | } else { | |
1233 | NFS_SOCK_DBG("nfs connect %s has %d addresses\n", | |
1234 | vfs_statfs(nmp->nm_mountp)->f_mntfromname, addrtotal); | |
1235 | } | |
1236 | ||
1237 | lck_mtx_lock(&nmp->nm_lock); | |
1238 | nmp->nm_sockflags |= NMSOCK_CONNECTING; | |
1239 | nmp->nm_nss = &nss; | |
1240 | lck_mtx_unlock(&nmp->nm_lock); | |
1241 | microuptime(&start); | |
1242 | savederror = error = 0; | |
1243 | ||
1244 | tryagain: | |
1245 | /* initialize socket search state */ | |
1246 | bzero(&nss, sizeof(nss)); | |
1247 | nss.nss_addrcnt = addrtotal; | |
1248 | nss.nss_error = savederror; | |
1249 | TAILQ_INIT(&nss.nss_socklist); | |
1250 | nss.nss_sotype = sotype; | |
1251 | nss.nss_startloc = nmp->nm_locations.nl_current; | |
1252 | nss.nss_timestamp = start.tv_sec; | |
1253 | nss.nss_timeo = timeo; | |
1254 | if (verbose) { | |
1255 | nss.nss_flags |= NSS_VERBOSE; | |
1256 | } | |
1257 | ||
1258 | /* First time connecting, we may need to negotiate some things */ | |
1259 | if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) { | |
1260 | NFS_SOCK_DBG("so_family = %d\n", nmp->nm_sofamily); | |
1261 | NFS_SOCK_DBG("nfs port = %d local: <%s>\n", nmp->nm_nfsport, nmp->nm_nfs_localport ? nmp->nm_nfs_localport : ""); | |
1262 | NFS_SOCK_DBG("mount port = %d local: <%s>\n", nmp->nm_mountport, nmp->nm_mount_localport ? nmp->nm_mount_localport : ""); | |
1263 | if (!nmp->nm_vers) { | |
1264 | /* No NFS version specified... */ | |
1265 | if (!nmp->nm_nfsport || (!NM_OMATTR_GIVEN(nmp, FH) && !nmp->nm_mountport)) { | |
1266 | #if CONFIG_NFS4 | |
1267 | if (PVER2MAJOR(nmp->nm_max_vers) >= NFS_VER4 && tryv4) { | |
1268 | nss.nss_port = NFS_PORT; | |
1269 | nss.nss_protocol = NFS_PROG; | |
1270 | nss.nss_version = 4; | |
1271 | nss.nss_flags |= NSS_FALLBACK2PMAP; | |
1272 | } else { | |
1273 | #endif | |
1274 | /* ...connect to portmapper first if we (may) need any ports. */ | |
1275 | nss.nss_port = PMAPPORT; | |
1276 | nss.nss_protocol = PMAPPROG; | |
1277 | nss.nss_version = 0; | |
1278 | #if CONFIG_NFS4 | |
1279 | } | |
1280 | #endif | |
1281 | } else { | |
1282 | /* ...connect to NFS port first. */ | |
1283 | nss.nss_port = nmp->nm_nfsport; | |
1284 | nss.nss_protocol = NFS_PROG; | |
1285 | nss.nss_version = 0; | |
1286 | } | |
1287 | #if CONFIG_NFS4 | |
1288 | } else if (nmp->nm_vers >= NFS_VER4) { | |
1289 | if (tryv4) { | |
1290 | /* For NFSv4, we use the given (or default) port. */ | |
1291 | nss.nss_port = nmp->nm_nfsport ? nmp->nm_nfsport : NFS_PORT; | |
1292 | nss.nss_protocol = NFS_PROG; | |
1293 | nss.nss_version = 4; | |
1294 | /* | |
1295 | * set NSS_FALLBACK2PMAP here to pick up any non standard port | |
1296 | * if no port is specified on the mount; | |
1297 | * Note nm_vers is set so we will only try NFS_VER4. | |
1298 | */ | |
1299 | if (!nmp->nm_nfsport) { | |
1300 | nss.nss_flags |= NSS_FALLBACK2PMAP; | |
1301 | } | |
1302 | } else { | |
1303 | nss.nss_port = PMAPPORT; | |
1304 | nss.nss_protocol = PMAPPROG; | |
1305 | nss.nss_version = 0; | |
1306 | } | |
1307 | #endif | |
1308 | } else { | |
1309 | /* For NFSv3/v2... */ | |
1310 | if (!nmp->nm_nfsport || (!NM_OMATTR_GIVEN(nmp, FH) && !nmp->nm_mountport)) { | |
1311 | /* ...connect to portmapper first if we need any ports. */ | |
1312 | nss.nss_port = PMAPPORT; | |
1313 | nss.nss_protocol = PMAPPROG; | |
1314 | nss.nss_version = 0; | |
1315 | } else { | |
1316 | /* ...connect to NFS port first. */ | |
1317 | nss.nss_port = nmp->nm_nfsport; | |
1318 | nss.nss_protocol = NFS_PROG; | |
1319 | nss.nss_version = nmp->nm_vers; | |
1320 | } | |
1321 | } | |
1322 | NFS_SOCK_DBG("nfs connect first %s, so type %d port %d prot %d %d\n", | |
1323 | vfs_statfs(nmp->nm_mountp)->f_mntfromname, nss.nss_sotype, nss.nss_port, | |
1324 | nss.nss_protocol, nss.nss_version); | |
1325 | } else { | |
1326 | /* we've connected before, just connect to NFS port */ | |
1327 | if (!nmp->nm_nfsport) { | |
1328 | /* need to ask portmapper which port that would be */ | |
1329 | nss.nss_port = PMAPPORT; | |
1330 | nss.nss_protocol = PMAPPROG; | |
1331 | nss.nss_version = 0; | |
1332 | } else { | |
1333 | nss.nss_port = nmp->nm_nfsport; | |
1334 | nss.nss_protocol = NFS_PROG; | |
1335 | nss.nss_version = nmp->nm_vers; | |
1336 | } | |
1337 | NFS_SOCK_DBG("nfs connect %s, so type %d port %d prot %d %d\n", | |
1338 | vfs_statfs(nmp->nm_mountp)->f_mntfromname, nss.nss_sotype, nss.nss_port, | |
1339 | nss.nss_protocol, nss.nss_version); | |
1340 | } | |
1341 | ||
1342 | /* Set next location to first valid location. */ | |
1343 | /* If start location is invalid, find next location. */ | |
1344 | nss.nss_nextloc = nss.nss_startloc; | |
1345 | if ((nss.nss_nextloc.nli_serv >= nmp->nm_locations.nl_locations[nss.nss_nextloc.nli_loc]->nl_servcount) || | |
1346 | (nss.nss_nextloc.nli_addr >= nmp->nm_locations.nl_locations[nss.nss_nextloc.nli_loc]->nl_servers[nss.nss_nextloc.nli_serv]->ns_addrcount)) { | |
1347 | nfs_location_next(&nmp->nm_locations, &nss.nss_nextloc); | |
1348 | if (!nfs_location_index_cmp(&nss.nss_nextloc, &nss.nss_startloc)) { | |
1349 | NFS_SOCK_DBG("nfs connect %s search failed, couldn't find a valid location index\n", | |
1350 | vfs_statfs(nmp->nm_mountp)->f_mntfromname); | |
1351 | return ENOENT; | |
1352 | } | |
1353 | } | |
1354 | nss.nss_last = -1; | |
1355 | ||
1356 | keepsearching: | |
1357 | ||
1358 | error = nfs_connect_search_loop(nmp, &nss); | |
1359 | if (error || !nss.nss_sock) { | |
1360 | /* search failed */ | |
1361 | nfs_socket_search_cleanup(&nss); | |
1362 | if (nss.nss_flags & NSS_FALLBACK2PMAP) { | |
1363 | tryv4 = 0; | |
1364 | NFS_SOCK_DBG("nfs connect %s TCP failed for V4 %d %d, trying PORTMAP\n", | |
1365 | vfs_statfs(nmp->nm_mountp)->f_mntfromname, error, nss.nss_error); | |
1366 | goto tryagain; | |
1367 | } | |
1368 | ||
1369 | if (!error && (nss.nss_sotype == SOCK_STREAM) && !nmp->nm_sotype && (nmp->nm_vers < NFS_VER4)) { | |
1370 | /* Try using UDP */ | |
1371 | sotype = SOCK_DGRAM; | |
1372 | savederror = nss.nss_error; | |
1373 | NFS_SOCK_DBG("nfs connect %s TCP failed %d %d, trying UDP\n", | |
1374 | vfs_statfs(nmp->nm_mountp)->f_mntfromname, error, nss.nss_error); | |
1375 | goto tryagain; | |
1376 | } | |
1377 | if (!error) { | |
1378 | error = nss.nss_error ? nss.nss_error : ETIMEDOUT; | |
1379 | } | |
1380 | lck_mtx_lock(&nmp->nm_lock); | |
1381 | nmp->nm_sockflags &= ~NMSOCK_CONNECTING; | |
1382 | nmp->nm_nss = NULL; | |
1383 | lck_mtx_unlock(&nmp->nm_lock); | |
1384 | if (nss.nss_flags & NSS_WARNED) { | |
1385 | log(LOG_INFO, "nfs_connect: socket connect aborted for %s\n", | |
1386 | vfs_statfs(nmp->nm_mountp)->f_mntfromname); | |
1387 | } | |
1388 | if (fh) { | |
1389 | FREE(fh, M_TEMP); | |
1390 | } | |
1391 | if (path) { | |
1392 | FREE_ZONE(path, MAXPATHLEN, M_NAMEI); | |
1393 | } | |
1394 | NFS_SOCK_DBG("nfs connect %s search failed, returning %d\n", | |
1395 | vfs_statfs(nmp->nm_mountp)->f_mntfromname, error); | |
1396 | return error; | |
1397 | } | |
1398 | ||
1399 | /* try to use nss_sock */ | |
1400 | nso = nss.nss_sock; | |
1401 | nss.nss_sock = NULL; | |
1402 | ||
1403 | /* We may be speaking to portmap first... to determine port(s). */ | |
1404 | if (nso->nso_saddr->sa_family == AF_INET) { | |
1405 | port = ntohs(((struct sockaddr_in*)nso->nso_saddr)->sin_port); | |
1406 | } else if (nso->nso_saddr->sa_family == AF_INET6) { | |
1407 | port = ntohs(((struct sockaddr_in6*)nso->nso_saddr)->sin6_port); | |
1408 | } else if (nso->nso_saddr->sa_family == AF_LOCAL) { | |
1409 | if (nso->nso_protocol == PMAPPROG) { | |
1410 | port = PMAPPORT; | |
1411 | } | |
1412 | } | |
1413 | ||
1414 | if (port == PMAPPORT) { | |
1415 | /* Use this portmapper port to get the port #s we need. */ | |
1416 | NFS_SOCK_DBG("nfs connect %s got portmapper socket %p\n", | |
1417 | vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso); | |
1418 | ||
1419 | /* remove the connect upcall so nfs_portmap_lookup() can use this socket */ | |
1420 | sock_setupcall(nso->nso_so, NULL, NULL); | |
1421 | ||
1422 | /* Set up socket address and port for NFS socket. */ | |
1423 | bcopy(nso->nso_saddr, &ss, nso->nso_saddr->sa_len); | |
1424 | ||
1425 | /* If NFS version not set, try nm_max_vers down to nm_min_vers */ | |
1426 | nfsvers = nmp->nm_vers ? nmp->nm_vers : PVER2MAJOR(nmp->nm_max_vers); | |
1427 | if (!(port = nmp->nm_nfsport)) { | |
1428 | if (ss.ss_family == AF_INET) { | |
1429 | ((struct sockaddr_in*)&ss)->sin_port = htons(0); | |
1430 | } else if (ss.ss_family == AF_INET6) { | |
1431 | ((struct sockaddr_in6*)&ss)->sin6_port = htons(0); | |
1432 | } else if (ss.ss_family == AF_LOCAL) { | |
1433 | if (((struct sockaddr_un*)&ss)->sun_path[0] == '/') { | |
1434 | NFS_SOCK_DBG("Looking up NFS socket over %s\n", ((struct sockaddr_un*)&ss)->sun_path); | |
1435 | } | |
1436 | } | |
1437 | for (; nfsvers >= (int)PVER2MAJOR(nmp->nm_min_vers); nfsvers--) { | |
1438 | if (nmp->nm_vers && nmp->nm_vers != nfsvers) { | |
1439 | continue; /* Wrong version */ | |
1440 | } | |
1441 | #if CONFIG_NFS4 | |
1442 | if (nfsvers == NFS_VER4 && nso->nso_sotype == SOCK_DGRAM) { | |
1443 | continue; /* NFSv4 does not do UDP */ | |
1444 | } | |
1445 | #endif | |
1446 | if (ss.ss_family == AF_LOCAL && nmp->nm_nfs_localport) { | |
1447 | struct sockaddr_un *sun = (struct sockaddr_un *)&ss; | |
1448 | NFS_SOCK_DBG("Using supplied local address %s for NFS_PROG\n", nmp->nm_nfs_localport); | |
1449 | strlcpy(sun->sun_path, nmp->nm_nfs_localport, sizeof(sun->sun_path)); | |
1450 | error = 0; | |
1451 | } else { | |
1452 | NFS_SOCK_DBG("Calling Portmap/Rpcbind for NFS_PROG"); | |
1453 | error = nfs_portmap_lookup(nmp, vfs_context_current(), (struct sockaddr*)&ss, | |
1454 | nso->nso_so, NFS_PROG, nfsvers, nso->nso_sotype, timeo); | |
1455 | } | |
1456 | if (!error) { | |
1457 | if (ss.ss_family == AF_INET) { | |
1458 | port = ntohs(((struct sockaddr_in*)&ss)->sin_port); | |
1459 | } else if (ss.ss_family == AF_INET6) { | |
1460 | port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port); | |
1461 | } else if (ss.ss_family == AF_LOCAL) { | |
1462 | port = ((struct sockaddr_un *)&ss)->sun_path[0] ? NFS_PORT : 0; | |
1463 | } | |
1464 | if (!port) { | |
1465 | error = EPROGUNAVAIL; | |
1466 | } | |
1467 | #if CONFIG_NFS4 | |
1468 | if (port == NFS_PORT && nfsvers == NFS_VER4 && tryv4 == 0) { | |
1469 | continue; /* We already tried this */ | |
1470 | } | |
1471 | #endif | |
1472 | } | |
1473 | if (!error) { | |
1474 | break; | |
1475 | } | |
1476 | } | |
1477 | if (nfsvers < (int)PVER2MAJOR(nmp->nm_min_vers) && error == 0) { | |
1478 | error = EPROGUNAVAIL; | |
1479 | } | |
1480 | if (error) { | |
1481 | nfs_socket_search_update_error(&nss, error); | |
1482 | nfs_socket_destroy(nso); | |
1483 | NFS_SOCK_DBG("Could not lookup NFS socket address for version %d error = %d\n", nfsvers, error); | |
1484 | goto keepsearching; | |
1485 | } | |
1486 | } else if (nmp->nm_nfs_localport) { | |
1487 | strlcpy(((struct sockaddr_un*)&ss)->sun_path, nmp->nm_nfs_localport, sizeof(((struct sockaddr_un*)&ss)->sun_path)); | |
1488 | NFS_SOCK_DBG("Using supplied nfs_local_port %s for NFS_PROG\n", nmp->nm_nfs_localport); | |
1489 | } | |
1490 | ||
1491 | /* Create NFS protocol socket and add it to the list of sockets. */ | |
1492 | /* N.B. If nfsvers is NFS_VER4 at this point then we're on a non standard port */ | |
1493 | if (ss.ss_family == AF_LOCAL) { | |
1494 | NFS_SOCK_DBG("Creating NFS socket for %s port = %d\n", ((struct sockaddr_un*)&ss)->sun_path, port); | |
1495 | } | |
1496 | error = nfs_socket_create(nmp, (struct sockaddr*)&ss, nso->nso_sotype, port, | |
1497 | NFS_PROG, nfsvers, NMFLAG(nmp, RESVPORT), &nsonfs); | |
1498 | if (error) { | |
1499 | nfs_socket_search_update_error(&nss, error); | |
1500 | nfs_socket_destroy(nso); | |
1501 | NFS_SOCK_DBG("Could not create NFS socket: %d\n", error); | |
1502 | goto keepsearching; | |
1503 | } | |
1504 | nsonfs->nso_location = nso->nso_location; | |
1505 | nsonfs->nso_wake = &nss; | |
1506 | error = sock_setupcall(nsonfs->nso_so, nfs_connect_upcall, nsonfs); | |
1507 | if (error) { | |
1508 | nfs_socket_search_update_error(&nss, error); | |
1509 | nfs_socket_destroy(nsonfs); | |
1510 | nfs_socket_destroy(nso); | |
1511 | NFS_SOCK_DBG("Could not nfs_connect_upcall: %d", error); | |
1512 | goto keepsearching; | |
1513 | } | |
1514 | TAILQ_INSERT_TAIL(&nss.nss_socklist, nsonfs, nso_link); | |
1515 | nss.nss_sockcnt++; | |
1516 | if ((nfsvers < NFS_VER4) && !(nmp->nm_sockflags & NMSOCK_HASCONNECTED) && !NM_OMATTR_GIVEN(nmp, FH)) { | |
1517 | /* Set up socket address and port for MOUNT socket. */ | |
1518 | error = 0; | |
1519 | bcopy(nso->nso_saddr, &ss, nso->nso_saddr->sa_len); | |
1520 | port = nmp->nm_mountport; | |
1521 | NFS_SOCK_DBG("mount port = %d\n", port); | |
1522 | if (ss.ss_family == AF_INET) { | |
1523 | ((struct sockaddr_in*)&ss)->sin_port = htons(port); | |
1524 | } else if (ss.ss_family == AF_INET6) { | |
1525 | ((struct sockaddr_in6*)&ss)->sin6_port = htons(port); | |
1526 | } else if (ss.ss_family == AF_LOCAL && nmp->nm_mount_localport) { | |
1527 | NFS_SOCK_DBG("Setting mount address to %s port = %d\n", nmp->nm_mount_localport, nmp->nm_mountport); | |
1528 | strlcpy(((struct sockaddr_un*)&ss)->sun_path, nmp->nm_mount_localport, sizeof(((struct sockaddr_un*)&ss)->sun_path)); | |
1529 | } | |
1530 | if (!port) { | |
1531 | /* Get port/sockaddr for MOUNT version corresponding to NFS version. */ | |
1532 | /* If NFS version is unknown, optimistically choose for NFSv3. */ | |
1533 | int mntvers = (nfsvers == NFS_VER2) ? RPCMNT_VER1 : RPCMNT_VER3; | |
1534 | int mntproto = (NM_OMFLAG(nmp, MNTUDP) || (nso->nso_sotype == SOCK_DGRAM)) ? IPPROTO_UDP : IPPROTO_TCP; | |
1535 | NFS_SOCK_DBG("Looking up mount port with socket %p\n", nso->nso_so); | |
1536 | error = nfs_portmap_lookup(nmp, vfs_context_current(), (struct sockaddr*)&ss, | |
1537 | nso->nso_so, RPCPROG_MNT, mntvers, mntproto == IPPROTO_UDP ? SOCK_DGRAM : SOCK_STREAM, timeo); | |
1538 | } | |
1539 | if (!error) { | |
1540 | if (ss.ss_family == AF_INET) { | |
1541 | port = ntohs(((struct sockaddr_in*)&ss)->sin_port); | |
1542 | } else if (ss.ss_family == AF_INET6) { | |
1543 | port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port); | |
1544 | } else if (ss.ss_family == AF_LOCAL) { | |
1545 | port = (((struct sockaddr_un*)&ss)->sun_path[0] != '\0'); | |
1546 | } | |
1547 | if (!port) { | |
1548 | error = EPROGUNAVAIL; | |
1549 | } | |
1550 | } | |
1551 | /* create sockaddr for MOUNT */ | |
1552 | if (!error) { | |
1553 | MALLOC(nsonfs->nso_saddr2, struct sockaddr *, ss.ss_len, M_SONAME, M_WAITOK | M_ZERO); | |
1554 | } | |
1555 | if (!error && !nsonfs->nso_saddr2) { | |
1556 | error = ENOMEM; | |
1557 | } | |
1558 | if (!error) { | |
1559 | bcopy(&ss, nsonfs->nso_saddr2, ss.ss_len); | |
1560 | } | |
1561 | if (error) { | |
1562 | NFS_SOCK_DBG("Could not create mount sockaet address %d", error); | |
1563 | lck_mtx_lock(&nsonfs->nso_lock); | |
1564 | nsonfs->nso_error = error; | |
1565 | nsonfs->nso_flags |= NSO_DEAD; | |
1566 | lck_mtx_unlock(&nsonfs->nso_lock); | |
1567 | } | |
1568 | } | |
1569 | NFS_SOCK_DBG("Destroying socket %p so %p\n", nso, nso->nso_so); | |
1570 | nfs_socket_destroy(nso); | |
1571 | goto keepsearching; | |
1572 | } | |
1573 | ||
1574 | /* nso is an NFS socket */ | |
1575 | NFS_SOCK_DBG("nfs connect %s got NFS socket %p\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso); | |
1576 | ||
1577 | /* If NFS version wasn't specified, it was determined during the connect. */ | |
1578 | nfsvers = nmp->nm_vers ? nmp->nm_vers : (int)nso->nso_version; | |
1579 | ||
1580 | /* Perform MOUNT call for initial NFSv2/v3 connection/mount. */ | |
1581 | if ((nfsvers < NFS_VER4) && !(nmp->nm_sockflags & NMSOCK_HASCONNECTED) && !NM_OMATTR_GIVEN(nmp, FH)) { | |
1582 | error = 0; | |
1583 | saddr = nso->nso_saddr2; | |
1584 | if (!saddr) { | |
1585 | /* Need sockaddr for MOUNT port */ | |
1586 | NFS_SOCK_DBG("Getting mount address mountport = %d, mount_localport = %s\n", nmp->nm_mountport, nmp->nm_mount_localport); | |
1587 | bcopy(nso->nso_saddr, &ss, nso->nso_saddr->sa_len); | |
1588 | port = nmp->nm_mountport; | |
1589 | if (ss.ss_family == AF_INET) { | |
1590 | ((struct sockaddr_in*)&ss)->sin_port = htons(port); | |
1591 | } else if (ss.ss_family == AF_INET6) { | |
1592 | ((struct sockaddr_in6*)&ss)->sin6_port = htons(port); | |
1593 | } else if (ss.ss_family == AF_LOCAL && nmp->nm_mount_localport) { | |
1594 | NFS_SOCK_DBG("Setting mount address to %s port = %d\n", nmp->nm_mount_localport, nmp->nm_mountport); | |
1595 | strlcpy(((struct sockaddr_un*)&ss)->sun_path, nmp->nm_mount_localport, sizeof(((struct sockaddr_un*)&ss)->sun_path)); | |
1596 | } | |
1597 | if (!port) { | |
1598 | /* Get port/sockaddr for MOUNT version corresponding to NFS version. */ | |
1599 | int mntvers = (nfsvers == NFS_VER2) ? RPCMNT_VER1 : RPCMNT_VER3; | |
1600 | int so_type = NM_OMFLAG(nmp, MNTUDP) ? SOCK_DGRAM : nso->nso_sotype; | |
1601 | error = nfs_portmap_lookup(nmp, vfs_context_current(), (struct sockaddr*)&ss, | |
1602 | NULL, RPCPROG_MNT, mntvers, so_type, timeo); | |
1603 | if (ss.ss_family == AF_INET) { | |
1604 | port = ntohs(((struct sockaddr_in*)&ss)->sin_port); | |
1605 | } else if (ss.ss_family == AF_INET6) { | |
1606 | port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port); | |
1607 | } | |
1608 | } | |
1609 | if (!error) { | |
1610 | if (port) { | |
1611 | saddr = (struct sockaddr*)&ss; | |
1612 | } else { | |
1613 | error = EPROGUNAVAIL; | |
1614 | } | |
1615 | } | |
1616 | } | |
1617 | if (saddr) { | |
1618 | MALLOC(fh, fhandle_t *, sizeof(fhandle_t), M_TEMP, M_WAITOK | M_ZERO); | |
1619 | } | |
1620 | if (saddr && fh) { | |
1621 | MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); | |
1622 | } | |
1623 | if (!saddr || !fh || !path) { | |
1624 | if (!error) { | |
1625 | error = ENOMEM; | |
1626 | } | |
1627 | if (fh) { | |
1628 | FREE(fh, M_TEMP); | |
1629 | } | |
1630 | if (path) { | |
1631 | FREE_ZONE(path, MAXPATHLEN, M_NAMEI); | |
1632 | } | |
1633 | fh = NULL; | |
1634 | path = NULL; | |
1635 | nfs_socket_search_update_error(&nss, error); | |
1636 | nfs_socket_destroy(nso); | |
1637 | goto keepsearching; | |
1638 | } | |
1639 | nfs_location_mntfromname(&nmp->nm_locations, nso->nso_location, path, MAXPATHLEN, 1); | |
1640 | error = nfs3_mount_rpc(nmp, saddr, nso->nso_sotype, nfsvers, | |
1641 | path, vfs_context_current(), timeo, fh, &nmp->nm_servsec); | |
1642 | NFS_SOCK_DBG("nfs connect %s socket %p mount %d\n", | |
1643 | vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error); | |
1644 | if (!error) { | |
1645 | /* Make sure we can agree on a security flavor. */ | |
1646 | int o, s; /* indices into mount option and server security flavor lists */ | |
1647 | int found = 0; | |
1648 | ||
1649 | if ((nfsvers == NFS_VER3) && !nmp->nm_servsec.count) { | |
1650 | /* Some servers return an empty list to indicate RPCAUTH_SYS? */ | |
1651 | nmp->nm_servsec.count = 1; | |
1652 | nmp->nm_servsec.flavors[0] = RPCAUTH_SYS; | |
1653 | } | |
1654 | if (nmp->nm_sec.count) { | |
1655 | /* Choose the first flavor in our list that the server supports. */ | |
1656 | if (!nmp->nm_servsec.count) { | |
1657 | /* we don't know what the server supports, just use our first choice */ | |
1658 | nmp->nm_auth = nmp->nm_sec.flavors[0]; | |
1659 | found = 1; | |
1660 | } | |
1661 | for (o = 0; !found && (o < nmp->nm_sec.count); o++) { | |
1662 | for (s = 0; !found && (s < nmp->nm_servsec.count); s++) { | |
1663 | if (nmp->nm_sec.flavors[o] == nmp->nm_servsec.flavors[s]) { | |
1664 | nmp->nm_auth = nmp->nm_sec.flavors[o]; | |
1665 | found = 1; | |
1666 | } | |
1667 | } | |
1668 | } | |
1669 | } else { | |
1670 | /* Choose the first one we support from the server's list. */ | |
1671 | if (!nmp->nm_servsec.count) { | |
1672 | nmp->nm_auth = RPCAUTH_SYS; | |
1673 | found = 1; | |
1674 | } | |
1675 | for (s = 0; s < nmp->nm_servsec.count; s++) { | |
1676 | switch (nmp->nm_servsec.flavors[s]) { | |
1677 | case RPCAUTH_SYS: | |
1678 | /* prefer RPCAUTH_SYS to RPCAUTH_NONE */ | |
1679 | if (found && (nmp->nm_auth == RPCAUTH_NONE)) { | |
1680 | found = 0; | |
1681 | } | |
1682 | case RPCAUTH_NONE: | |
1683 | case RPCAUTH_KRB5: | |
1684 | case RPCAUTH_KRB5I: | |
1685 | case RPCAUTH_KRB5P: | |
1686 | if (!found) { | |
1687 | nmp->nm_auth = nmp->nm_servsec.flavors[s]; | |
1688 | found = 1; | |
1689 | } | |
1690 | break; | |
1691 | } | |
1692 | } | |
1693 | } | |
1694 | error = !found ? EAUTH : 0; | |
1695 | } | |
1696 | FREE_ZONE(path, MAXPATHLEN, M_NAMEI); | |
1697 | path = NULL; | |
1698 | if (error) { | |
1699 | nfs_socket_search_update_error(&nss, error); | |
1700 | FREE(fh, M_TEMP); | |
1701 | fh = NULL; | |
1702 | nfs_socket_destroy(nso); | |
1703 | goto keepsearching; | |
1704 | } | |
1705 | if (nmp->nm_fh) { | |
1706 | FREE(nmp->nm_fh, M_TEMP); | |
1707 | } | |
1708 | nmp->nm_fh = fh; | |
1709 | fh = NULL; | |
1710 | NFS_BITMAP_SET(nmp->nm_flags, NFS_MFLAG_CALLUMNT); | |
1711 | } | |
1712 | ||
1713 | /* put the real upcall in place */ | |
1714 | upcall = (nso->nso_sotype == SOCK_STREAM) ? nfs_tcp_rcv : nfs_udp_rcv; | |
1715 | error = sock_setupcall(nso->nso_so, upcall, nmp); | |
1716 | if (error) { | |
1717 | nfs_socket_search_update_error(&nss, error); | |
1718 | nfs_socket_destroy(nso); | |
1719 | goto keepsearching; | |
1720 | } | |
1721 | ||
1722 | if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) { | |
1723 | /* set mntfromname to this location */ | |
1724 | if (!NM_OMATTR_GIVEN(nmp, MNTFROM)) { | |
1725 | nfs_location_mntfromname(&nmp->nm_locations, nso->nso_location, | |
1726 | vfs_statfs(nmp->nm_mountp)->f_mntfromname, | |
1727 | sizeof(vfs_statfs(nmp->nm_mountp)->f_mntfromname), 0); | |
1728 | } | |
1729 | /* some negotiated values need to remain unchanged for the life of the mount */ | |
1730 | if (!nmp->nm_sotype) { | |
1731 | nmp->nm_sotype = nso->nso_sotype; | |
1732 | } | |
1733 | if (!nmp->nm_vers) { | |
1734 | nmp->nm_vers = nfsvers; | |
1735 | #if CONFIG_NFS4 | |
1736 | /* If we negotiated NFSv4, set nm_nfsport if we ended up on the standard NFS port */ | |
1737 | if ((nfsvers >= NFS_VER4) && !NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_NFS_PORT)) { | |
1738 | if (nso->nso_saddr->sa_family == AF_INET) { | |
1739 | port = ((struct sockaddr_in*)nso->nso_saddr)->sin_port = htons(port); | |
1740 | } else if (nso->nso_saddr->sa_family == AF_INET6) { | |
1741 | port = ((struct sockaddr_in6*)nso->nso_saddr)->sin6_port = htons(port); | |
1742 | } else { | |
1743 | port = 0; | |
1744 | } | |
1745 | if (port == NFS_PORT) { | |
1746 | nmp->nm_nfsport = NFS_PORT; | |
1747 | } | |
1748 | } | |
1749 | #endif | |
1750 | } | |
1751 | #if CONFIG_NFS4 | |
1752 | /* do some version-specific pre-mount set up */ | |
1753 | if (nmp->nm_vers >= NFS_VER4) { | |
1754 | microtime(&now); | |
1755 | nmp->nm_mounttime = ((uint64_t)now.tv_sec << 32) | now.tv_usec; | |
1756 | if (!NMFLAG(nmp, NOCALLBACK)) { | |
1757 | nfs4_mount_callback_setup(nmp); | |
1758 | } | |
1759 | } | |
1760 | #endif | |
1761 | } | |
1762 | ||
1763 | /* Initialize NFS socket state variables */ | |
1764 | lck_mtx_lock(&nmp->nm_lock); | |
1765 | nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] = | |
1766 | nmp->nm_srtt[3] = (NFS_TIMEO << 3); | |
1767 | nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] = | |
1768 | nmp->nm_sdrtt[3] = 0; | |
1769 | if (nso->nso_sotype == SOCK_DGRAM) { | |
1770 | nmp->nm_cwnd = NFS_MAXCWND / 2; /* Initial send window */ | |
1771 | nmp->nm_sent = 0; | |
1772 | } else if (nso->nso_sotype == SOCK_STREAM) { | |
1773 | nmp->nm_timeouts = 0; | |
1774 | } | |
1775 | nmp->nm_sockflags &= ~NMSOCK_CONNECTING; | |
1776 | nmp->nm_sockflags |= NMSOCK_SETUP; | |
1777 | /* move the socket to the mount structure */ | |
1778 | nmp->nm_nso = nso; | |
1779 | oldsaddr = nmp->nm_saddr; | |
1780 | nmp->nm_saddr = nso->nso_saddr; | |
1781 | lck_mtx_unlock(&nmp->nm_lock); | |
1782 | error = nfs_connect_setup(nmp); | |
1783 | lck_mtx_lock(&nmp->nm_lock); | |
1784 | nmp->nm_sockflags &= ~NMSOCK_SETUP; | |
1785 | if (!error) { | |
1786 | nmp->nm_sockflags |= NMSOCK_READY; | |
1787 | wakeup(&nmp->nm_sockflags); | |
1788 | } | |
1789 | if (error) { | |
1790 | NFS_SOCK_DBG("nfs connect %s socket %p setup failed %d\n", | |
1791 | vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error); | |
1792 | nfs_socket_search_update_error(&nss, error); | |
1793 | nmp->nm_saddr = oldsaddr; | |
1794 | if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) { | |
1795 | /* undo settings made prior to setup */ | |
1796 | if (!NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_SOCKET_TYPE)) { | |
1797 | nmp->nm_sotype = 0; | |
1798 | } | |
1799 | if (!NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_NFS_VERSION)) { | |
1800 | #if CONFIG_NFS4 | |
1801 | if (nmp->nm_vers >= NFS_VER4) { | |
1802 | if (!NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_NFS_PORT)) { | |
1803 | nmp->nm_nfsport = 0; | |
1804 | } | |
1805 | if (nmp->nm_cbid) { | |
1806 | nfs4_mount_callback_shutdown(nmp); | |
1807 | } | |
1808 | if (IS_VALID_CRED(nmp->nm_mcred)) { | |
1809 | kauth_cred_unref(&nmp->nm_mcred); | |
1810 | } | |
1811 | bzero(&nmp->nm_un, sizeof(nmp->nm_un)); | |
1812 | } | |
1813 | #endif | |
1814 | nmp->nm_vers = 0; | |
1815 | } | |
1816 | } | |
1817 | lck_mtx_unlock(&nmp->nm_lock); | |
1818 | nmp->nm_nso = NULL; | |
1819 | nfs_socket_destroy(nso); | |
1820 | goto keepsearching; | |
1821 | } | |
1822 | ||
1823 | /* update current location */ | |
1824 | if ((nmp->nm_locations.nl_current.nli_flags & NLI_VALID) && | |
1825 | (nmp->nm_locations.nl_current.nli_serv != nso->nso_location.nli_serv)) { | |
1826 | /* server has changed, we should initiate failover/recovery */ | |
1827 | // XXX | |
1828 | } | |
1829 | nmp->nm_locations.nl_current = nso->nso_location; | |
1830 | nmp->nm_locations.nl_current.nli_flags |= NLI_VALID; | |
1831 | ||
1832 | if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) { | |
1833 | /* We have now successfully connected... make a note of it. */ | |
1834 | nmp->nm_sockflags |= NMSOCK_HASCONNECTED; | |
1835 | } | |
1836 | ||
1837 | lck_mtx_unlock(&nmp->nm_lock); | |
1838 | if (oldsaddr) { | |
1839 | FREE(oldsaddr, M_SONAME); | |
1840 | } | |
1841 | ||
1842 | if (nss.nss_flags & NSS_WARNED) { | |
1843 | log(LOG_INFO, "nfs_connect: socket connect completed for %s\n", | |
1844 | vfs_statfs(nmp->nm_mountp)->f_mntfromname); | |
1845 | } | |
1846 | ||
1847 | nmp->nm_nss = NULL; | |
1848 | nfs_socket_search_cleanup(&nss); | |
1849 | if (fh) { | |
1850 | FREE(fh, M_TEMP); | |
1851 | } | |
1852 | if (path) { | |
1853 | FREE_ZONE(path, MAXPATHLEN, M_NAMEI); | |
1854 | } | |
1855 | NFS_SOCK_DBG("nfs connect %s success\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname); | |
1856 | return 0; | |
1857 | } | |
1858 | ||
1859 | ||
1860 | /* setup & confirm socket connection is functional */ | |
1861 | int | |
1862 | nfs_connect_setup( | |
1863 | #if !CONFIG_NFS4 | |
1864 | __unused | |
1865 | #endif | |
1866 | struct nfsmount *nmp) | |
1867 | { | |
1868 | int error = 0; | |
1869 | #if CONFIG_NFS4 | |
1870 | if (nmp->nm_vers >= NFS_VER4) { | |
1871 | if (nmp->nm_state & NFSSTA_CLIENTID) { | |
1872 | /* first, try to renew our current state */ | |
1873 | error = nfs4_renew(nmp, R_SETUP); | |
1874 | if ((error == NFSERR_ADMIN_REVOKED) || | |
1875 | (error == NFSERR_CB_PATH_DOWN) || | |
1876 | (error == NFSERR_EXPIRED) || | |
1877 | (error == NFSERR_LEASE_MOVED) || | |
1878 | (error == NFSERR_STALE_CLIENTID)) { | |
1879 | lck_mtx_lock(&nmp->nm_lock); | |
1880 | nfs_need_recover(nmp, error); | |
1881 | lck_mtx_unlock(&nmp->nm_lock); | |
1882 | } | |
1883 | } | |
1884 | error = nfs4_setclientid(nmp); | |
1885 | } | |
1886 | #endif | |
1887 | return error; | |
1888 | } | |
1889 | ||
1890 | /* | |
1891 | * NFS socket reconnect routine: | |
1892 | * Called when a connection is broken. | |
1893 | * - disconnect the old socket | |
1894 | * - nfs_connect() again | |
1895 | * - set R_MUSTRESEND for all outstanding requests on mount point | |
1896 | * If this fails the mount point is DEAD! | |
1897 | */ | |
1898 | int | |
1899 | nfs_reconnect(struct nfsmount *nmp) | |
1900 | { | |
1901 | struct nfsreq *rq; | |
1902 | struct timeval now; | |
1903 | thread_t thd = current_thread(); | |
1904 | int error, wentdown = 0, verbose = 1; | |
1905 | time_t lastmsg; | |
1906 | int timeo; | |
1907 | ||
1908 | microuptime(&now); | |
1909 | lastmsg = now.tv_sec - (nmp->nm_tprintf_delay - nmp->nm_tprintf_initial_delay); | |
1910 | ||
1911 | nfs_disconnect(nmp); | |
1912 | ||
1913 | ||
1914 | lck_mtx_lock(&nmp->nm_lock); | |
1915 | timeo = nfs_is_squishy(nmp) ? 8 : 30; | |
1916 | lck_mtx_unlock(&nmp->nm_lock); | |
1917 | ||
1918 | while ((error = nfs_connect(nmp, verbose, timeo))) { | |
1919 | verbose = 0; | |
1920 | nfs_disconnect(nmp); | |
1921 | if ((error == EINTR) || (error == ERESTART)) { | |
1922 | return EINTR; | |
1923 | } | |
1924 | if (error == EIO) { | |
1925 | return EIO; | |
1926 | } | |
1927 | microuptime(&now); | |
1928 | if ((lastmsg + nmp->nm_tprintf_delay) < now.tv_sec) { | |
1929 | lastmsg = now.tv_sec; | |
1930 | nfs_down(nmp, thd, error, NFSSTA_TIMEO, "can not connect", 0); | |
1931 | wentdown = 1; | |
1932 | } | |
1933 | lck_mtx_lock(&nmp->nm_lock); | |
1934 | if (!(nmp->nm_state & NFSSTA_MOUNTED)) { | |
1935 | /* we're not yet completely mounted and */ | |
1936 | /* we can't reconnect, so we fail */ | |
1937 | lck_mtx_unlock(&nmp->nm_lock); | |
1938 | NFS_SOCK_DBG("Not mounted returning %d\n", error); | |
1939 | return error; | |
1940 | } | |
1941 | ||
1942 | if (nfs_mount_check_dead_timeout(nmp)) { | |
1943 | nfs_mount_make_zombie(nmp); | |
1944 | lck_mtx_unlock(&nmp->nm_lock); | |
1945 | return ENXIO; | |
1946 | } | |
1947 | ||
1948 | if ((error = nfs_sigintr(nmp, NULL, thd, 1))) { | |
1949 | lck_mtx_unlock(&nmp->nm_lock); | |
1950 | return error; | |
1951 | } | |
1952 | lck_mtx_unlock(&nmp->nm_lock); | |
1953 | tsleep(nfs_reconnect, PSOCK, "nfs_reconnect_delay", 2 * hz); | |
1954 | if ((error = nfs_sigintr(nmp, NULL, thd, 0))) { | |
1955 | return error; | |
1956 | } | |
1957 | } | |
1958 | ||
1959 | if (wentdown) { | |
1960 | nfs_up(nmp, thd, NFSSTA_TIMEO, "connected"); | |
1961 | } | |
1962 | ||
1963 | /* | |
1964 | * Loop through outstanding request list and mark all requests | |
1965 | * as needing a resend. (Though nfs_need_reconnect() probably | |
1966 | * marked them all already.) | |
1967 | */ | |
1968 | lck_mtx_lock(nfs_request_mutex); | |
1969 | TAILQ_FOREACH(rq, &nfs_reqq, r_chain) { | |
1970 | if (rq->r_nmp == nmp) { | |
1971 | lck_mtx_lock(&rq->r_mtx); | |
1972 | if (!rq->r_error && !rq->r_nmrep.nmc_mhead && !(rq->r_flags & R_MUSTRESEND)) { | |
1973 | rq->r_flags |= R_MUSTRESEND; | |
1974 | rq->r_rtt = -1; | |
1975 | wakeup(rq); | |
1976 | if ((rq->r_flags & (R_IOD | R_ASYNC | R_ASYNCWAIT | R_SENDING)) == R_ASYNC) { | |
1977 | nfs_asyncio_resend(rq); | |
1978 | } | |
1979 | } | |
1980 | lck_mtx_unlock(&rq->r_mtx); | |
1981 | } | |
1982 | } | |
1983 | lck_mtx_unlock(nfs_request_mutex); | |
1984 | return 0; | |
1985 | } | |
1986 | ||
1987 | /* | |
1988 | * NFS disconnect. Clean up and unlink. | |
1989 | */ | |
1990 | void | |
1991 | nfs_disconnect(struct nfsmount *nmp) | |
1992 | { | |
1993 | struct nfs_socket *nso; | |
1994 | ||
1995 | lck_mtx_lock(&nmp->nm_lock); | |
1996 | tryagain: | |
1997 | if (nmp->nm_nso) { | |
1998 | struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 }; | |
1999 | if (nmp->nm_state & NFSSTA_SENDING) { /* wait for sending to complete */ | |
2000 | nmp->nm_state |= NFSSTA_WANTSND; | |
2001 | msleep(&nmp->nm_state, &nmp->nm_lock, PZERO - 1, "nfswaitsending", &ts); | |
2002 | goto tryagain; | |
2003 | } | |
2004 | if (nmp->nm_sockflags & NMSOCK_POKE) { /* wait for poking to complete */ | |
2005 | msleep(&nmp->nm_sockflags, &nmp->nm_lock, PZERO - 1, "nfswaitpoke", &ts); | |
2006 | goto tryagain; | |
2007 | } | |
2008 | nmp->nm_sockflags |= NMSOCK_DISCONNECTING; | |
2009 | nmp->nm_sockflags &= ~NMSOCK_READY; | |
2010 | nso = nmp->nm_nso; | |
2011 | nmp->nm_nso = NULL; | |
2012 | if (nso->nso_saddr == nmp->nm_saddr) { | |
2013 | nso->nso_saddr = NULL; | |
2014 | } | |
2015 | lck_mtx_unlock(&nmp->nm_lock); | |
2016 | nfs_socket_destroy(nso); | |
2017 | lck_mtx_lock(&nmp->nm_lock); | |
2018 | nmp->nm_sockflags &= ~NMSOCK_DISCONNECTING; | |
2019 | lck_mtx_unlock(&nmp->nm_lock); | |
2020 | } else { | |
2021 | lck_mtx_unlock(&nmp->nm_lock); | |
2022 | } | |
2023 | } | |
2024 | ||
2025 | /* | |
2026 | * mark an NFS mount as needing a reconnect/resends. | |
2027 | */ | |
2028 | void | |
2029 | nfs_need_reconnect(struct nfsmount *nmp) | |
2030 | { | |
2031 | struct nfsreq *rq; | |
2032 | ||
2033 | lck_mtx_lock(&nmp->nm_lock); | |
2034 | nmp->nm_sockflags &= ~(NMSOCK_READY | NMSOCK_SETUP); | |
2035 | lck_mtx_unlock(&nmp->nm_lock); | |
2036 | ||
2037 | /* | |
2038 | * Loop through outstanding request list and | |
2039 | * mark all requests as needing a resend. | |
2040 | */ | |
2041 | lck_mtx_lock(nfs_request_mutex); | |
2042 | TAILQ_FOREACH(rq, &nfs_reqq, r_chain) { | |
2043 | if (rq->r_nmp == nmp) { | |
2044 | lck_mtx_lock(&rq->r_mtx); | |
2045 | if (!rq->r_error && !rq->r_nmrep.nmc_mhead && !(rq->r_flags & R_MUSTRESEND)) { | |
2046 | rq->r_flags |= R_MUSTRESEND; | |
2047 | rq->r_rtt = -1; | |
2048 | wakeup(rq); | |
2049 | if ((rq->r_flags & (R_IOD | R_ASYNC | R_ASYNCWAIT | R_SENDING)) == R_ASYNC) { | |
2050 | nfs_asyncio_resend(rq); | |
2051 | } | |
2052 | } | |
2053 | lck_mtx_unlock(&rq->r_mtx); | |
2054 | } | |
2055 | } | |
2056 | lck_mtx_unlock(nfs_request_mutex); | |
2057 | } | |
2058 | ||
2059 | ||
2060 | /* | |
2061 | * thread to handle miscellaneous async NFS socket work (reconnects/resends) | |
2062 | */ | |
2063 | void | |
2064 | nfs_mount_sock_thread(void *arg, __unused wait_result_t wr) | |
2065 | { | |
2066 | struct nfsmount *nmp = arg; | |
2067 | struct timespec ts = { .tv_sec = 30, .tv_nsec = 0 }; | |
2068 | thread_t thd = current_thread(); | |
2069 | struct nfsreq *req; | |
2070 | struct timeval now; | |
2071 | int error, dofinish; | |
2072 | nfsnode_t np; | |
2073 | int do_reconnect_sleep = 0; | |
2074 | ||
2075 | lck_mtx_lock(&nmp->nm_lock); | |
2076 | while (!(nmp->nm_sockflags & NMSOCK_READY) || | |
2077 | !TAILQ_EMPTY(&nmp->nm_resendq) || | |
2078 | !LIST_EMPTY(&nmp->nm_monlist) || | |
2079 | nmp->nm_deadto_start || | |
2080 | (nmp->nm_state & NFSSTA_RECOVER) || | |
2081 | ((nmp->nm_vers >= NFS_VER4) && !TAILQ_EMPTY(&nmp->nm_dreturnq))) { | |
2082 | if (nmp->nm_sockflags & NMSOCK_UNMOUNT) { | |
2083 | break; | |
2084 | } | |
2085 | /* do reconnect, if necessary */ | |
2086 | if (!(nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) { | |
2087 | if (nmp->nm_reconnect_start <= 0) { | |
2088 | microuptime(&now); | |
2089 | nmp->nm_reconnect_start = now.tv_sec; | |
2090 | } | |
2091 | lck_mtx_unlock(&nmp->nm_lock); | |
2092 | NFS_SOCK_DBG("nfs reconnect %s\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname); | |
2093 | /* | |
2094 | * XXX We don't want to call reconnect again right away if returned errors | |
2095 | * before that may not have blocked. This has caused spamming null procs | |
2096 | * from machines in the pass. | |
2097 | */ | |
2098 | if (do_reconnect_sleep) { | |
2099 | tsleep(nfs_mount_sock_thread, PSOCK, "nfs_reconnect_sock_thread_delay", hz); | |
2100 | } | |
2101 | error = nfs_reconnect(nmp); | |
2102 | if (error) { | |
2103 | int lvl = 7; | |
2104 | if (error == EIO || error == EINTR) { | |
2105 | lvl = (do_reconnect_sleep++ % 600) ? 7 : 0; | |
2106 | } | |
2107 | NFS_DBG(NFS_FAC_SOCK, lvl, "nfs reconnect %s: returned %d\n", | |
2108 | vfs_statfs(nmp->nm_mountp)->f_mntfromname, error); | |
2109 | } else { | |
2110 | nmp->nm_reconnect_start = 0; | |
2111 | do_reconnect_sleep = 0; | |
2112 | } | |
2113 | lck_mtx_lock(&nmp->nm_lock); | |
2114 | } | |
2115 | if ((nmp->nm_sockflags & NMSOCK_READY) && | |
2116 | (nmp->nm_state & NFSSTA_RECOVER) && | |
2117 | !(nmp->nm_sockflags & NMSOCK_UNMOUNT) && | |
2118 | !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) { | |
2119 | /* perform state recovery */ | |
2120 | lck_mtx_unlock(&nmp->nm_lock); | |
2121 | nfs_recover(nmp); | |
2122 | lck_mtx_lock(&nmp->nm_lock); | |
2123 | } | |
2124 | #if CONFIG_NFS4 | |
2125 | /* handle NFSv4 delegation returns */ | |
2126 | while ((nmp->nm_vers >= NFS_VER4) && !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) && | |
2127 | (nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & NFSSTA_RECOVER) && | |
2128 | ((np = TAILQ_FIRST(&nmp->nm_dreturnq)))) { | |
2129 | lck_mtx_unlock(&nmp->nm_lock); | |
2130 | nfs4_delegation_return(np, R_RECOVER, thd, nmp->nm_mcred); | |
2131 | lck_mtx_lock(&nmp->nm_lock); | |
2132 | } | |
2133 | #endif | |
2134 | /* do resends, if necessary/possible */ | |
2135 | while ((((nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & NFSSTA_RECOVER)) || | |
2136 | (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) && | |
2137 | ((req = TAILQ_FIRST(&nmp->nm_resendq)))) { | |
2138 | if (req->r_resendtime) { | |
2139 | microuptime(&now); | |
2140 | } | |
2141 | while (req && !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) && req->r_resendtime && (now.tv_sec < req->r_resendtime)) { | |
2142 | req = TAILQ_NEXT(req, r_rchain); | |
2143 | } | |
2144 | if (!req) { | |
2145 | break; | |
2146 | } | |
2147 | TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain); | |
2148 | req->r_rchain.tqe_next = NFSREQNOLIST; | |
2149 | lck_mtx_unlock(&nmp->nm_lock); | |
2150 | lck_mtx_lock(&req->r_mtx); | |
2151 | /* Note that we have a reference on the request that was taken nfs_asyncio_resend */ | |
2152 | if (req->r_error || req->r_nmrep.nmc_mhead) { | |
2153 | dofinish = req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT); | |
2154 | req->r_flags &= ~R_RESENDQ; | |
2155 | wakeup(req); | |
2156 | lck_mtx_unlock(&req->r_mtx); | |
2157 | if (dofinish) { | |
2158 | nfs_asyncio_finish(req); | |
2159 | } | |
2160 | nfs_request_rele(req); | |
2161 | lck_mtx_lock(&nmp->nm_lock); | |
2162 | continue; | |
2163 | } | |
2164 | if ((req->r_flags & R_RESTART) || nfs_request_using_gss(req)) { | |
2165 | req->r_flags &= ~R_RESTART; | |
2166 | req->r_resendtime = 0; | |
2167 | lck_mtx_unlock(&req->r_mtx); | |
2168 | /* async RPCs on GSS mounts need to be rebuilt and resent. */ | |
2169 | nfs_reqdequeue(req); | |
2170 | #if CONFIG_NFS_GSS | |
2171 | if (nfs_request_using_gss(req)) { | |
2172 | nfs_gss_clnt_rpcdone(req); | |
2173 | error = nfs_gss_clnt_args_restore(req); | |
2174 | if (error == ENEEDAUTH) { | |
2175 | req->r_xid = 0; | |
2176 | } | |
2177 | } | |
2178 | #endif /* CONFIG_NFS_GSS */ | |
2179 | NFS_SOCK_DBG("nfs async%s restart: p %d x 0x%llx f 0x%x rtt %d\n", | |
2180 | nfs_request_using_gss(req) ? " gss" : "", req->r_procnum, req->r_xid, | |
2181 | req->r_flags, req->r_rtt); | |
2182 | error = nfs_sigintr(nmp, req, req->r_thread, 0); | |
2183 | if (!error) { | |
2184 | error = nfs_request_add_header(req); | |
2185 | } | |
2186 | if (!error) { | |
2187 | error = nfs_request_send(req, 0); | |
2188 | } | |
2189 | lck_mtx_lock(&req->r_mtx); | |
2190 | if (req->r_flags & R_RESENDQ) { | |
2191 | req->r_flags &= ~R_RESENDQ; | |
2192 | } | |
2193 | if (error) { | |
2194 | req->r_error = error; | |
2195 | } | |
2196 | wakeup(req); | |
2197 | dofinish = error && req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT); | |
2198 | lck_mtx_unlock(&req->r_mtx); | |
2199 | if (dofinish) { | |
2200 | nfs_asyncio_finish(req); | |
2201 | } | |
2202 | nfs_request_rele(req); | |
2203 | lck_mtx_lock(&nmp->nm_lock); | |
2204 | error = 0; | |
2205 | continue; | |
2206 | } | |
2207 | NFS_SOCK_DBG("nfs async resend: p %d x 0x%llx f 0x%x rtt %d\n", | |
2208 | req->r_procnum, req->r_xid, req->r_flags, req->r_rtt); | |
2209 | error = nfs_sigintr(nmp, req, req->r_thread, 0); | |
2210 | if (!error) { | |
2211 | req->r_flags |= R_SENDING; | |
2212 | lck_mtx_unlock(&req->r_mtx); | |
2213 | error = nfs_send(req, 0); | |
2214 | lck_mtx_lock(&req->r_mtx); | |
2215 | if (!error) { | |
2216 | if (req->r_flags & R_RESENDQ) { | |
2217 | req->r_flags &= ~R_RESENDQ; | |
2218 | } | |
2219 | wakeup(req); | |
2220 | lck_mtx_unlock(&req->r_mtx); | |
2221 | nfs_request_rele(req); | |
2222 | lck_mtx_lock(&nmp->nm_lock); | |
2223 | continue; | |
2224 | } | |
2225 | } | |
2226 | req->r_error = error; | |
2227 | if (req->r_flags & R_RESENDQ) { | |
2228 | req->r_flags &= ~R_RESENDQ; | |
2229 | } | |
2230 | wakeup(req); | |
2231 | dofinish = req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT); | |
2232 | lck_mtx_unlock(&req->r_mtx); | |
2233 | if (dofinish) { | |
2234 | nfs_asyncio_finish(req); | |
2235 | } | |
2236 | nfs_request_rele(req); | |
2237 | lck_mtx_lock(&nmp->nm_lock); | |
2238 | } | |
2239 | if (nfs_mount_check_dead_timeout(nmp)) { | |
2240 | nfs_mount_make_zombie(nmp); | |
2241 | break; | |
2242 | } | |
2243 | ||
2244 | if (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) { | |
2245 | break; | |
2246 | } | |
2247 | /* check monitored nodes, if necessary/possible */ | |
2248 | if (!LIST_EMPTY(&nmp->nm_monlist)) { | |
2249 | nmp->nm_state |= NFSSTA_MONITOR_SCAN; | |
2250 | LIST_FOREACH(np, &nmp->nm_monlist, n_monlink) { | |
2251 | if (!(nmp->nm_sockflags & NMSOCK_READY) || | |
2252 | (nmp->nm_state & (NFSSTA_RECOVER | NFSSTA_UNMOUNTING | NFSSTA_FORCE | NFSSTA_DEAD))) { | |
2253 | break; | |
2254 | } | |
2255 | np->n_mflag |= NMMONSCANINPROG; | |
2256 | lck_mtx_unlock(&nmp->nm_lock); | |
2257 | error = nfs_getattr(np, NULL, vfs_context_kernel(), (NGA_UNCACHED | NGA_MONITOR)); | |
2258 | if (!error && ISSET(np->n_flag, NUPDATESIZE)) { /* update quickly to avoid multiple events */ | |
2259 | nfs_data_update_size(np, 0); | |
2260 | } | |
2261 | lck_mtx_lock(&nmp->nm_lock); | |
2262 | np->n_mflag &= ~NMMONSCANINPROG; | |
2263 | if (np->n_mflag & NMMONSCANWANT) { | |
2264 | np->n_mflag &= ~NMMONSCANWANT; | |
2265 | wakeup(&np->n_mflag); | |
2266 | } | |
2267 | if (error || !(nmp->nm_sockflags & NMSOCK_READY) || | |
2268 | (nmp->nm_state & (NFSSTA_RECOVER | NFSSTA_UNMOUNTING | NFSSTA_FORCE | NFSSTA_DEAD))) { | |
2269 | break; | |
2270 | } | |
2271 | } | |
2272 | nmp->nm_state &= ~NFSSTA_MONITOR_SCAN; | |
2273 | if (nmp->nm_state & NFSSTA_UNMOUNTING) { | |
2274 | wakeup(&nmp->nm_state); /* let unmounting thread know scan is done */ | |
2275 | } | |
2276 | } | |
2277 | if ((nmp->nm_sockflags & NMSOCK_READY) || (nmp->nm_state & (NFSSTA_RECOVER | NFSSTA_UNMOUNTING))) { | |
2278 | if (nmp->nm_deadto_start || !TAILQ_EMPTY(&nmp->nm_resendq) || | |
2279 | (nmp->nm_state & NFSSTA_RECOVER)) { | |
2280 | ts.tv_sec = 1; | |
2281 | } else { | |
2282 | ts.tv_sec = 5; | |
2283 | } | |
2284 | msleep(&nmp->nm_sockthd, &nmp->nm_lock, PSOCK, "nfssockthread", &ts); | |
2285 | } | |
2286 | } | |
2287 | ||
2288 | /* If we're unmounting, send the unmount RPC, if requested/appropriate. */ | |
2289 | if ((nmp->nm_sockflags & NMSOCK_UNMOUNT) && | |
2290 | (nmp->nm_state & NFSSTA_MOUNTED) && NMFLAG(nmp, CALLUMNT) && | |
2291 | (nmp->nm_vers < NFS_VER4) && !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) { | |
2292 | lck_mtx_unlock(&nmp->nm_lock); | |
2293 | nfs3_umount_rpc(nmp, vfs_context_kernel(), | |
2294 | (nmp->nm_sockflags & NMSOCK_READY) ? 6 : 2); | |
2295 | lck_mtx_lock(&nmp->nm_lock); | |
2296 | } | |
2297 | ||
2298 | if (nmp->nm_sockthd == thd) { | |
2299 | nmp->nm_sockthd = NULL; | |
2300 | } | |
2301 | lck_mtx_unlock(&nmp->nm_lock); | |
2302 | wakeup(&nmp->nm_sockthd); | |
2303 | thread_terminate(thd); | |
2304 | } | |
2305 | ||
2306 | /* start or wake a mount's socket thread */ | |
2307 | void | |
2308 | nfs_mount_sock_thread_wake(struct nfsmount *nmp) | |
2309 | { | |
2310 | if (nmp->nm_sockthd) { | |
2311 | wakeup(&nmp->nm_sockthd); | |
2312 | } else if (kernel_thread_start(nfs_mount_sock_thread, nmp, &nmp->nm_sockthd) == KERN_SUCCESS) { | |
2313 | thread_deallocate(nmp->nm_sockthd); | |
2314 | } | |
2315 | } | |
2316 | ||
2317 | /* | |
2318 | * Check if we should mark the mount dead because the | |
2319 | * unresponsive mount has reached the dead timeout. | |
2320 | * (must be called with nmp locked) | |
2321 | */ | |
2322 | int | |
2323 | nfs_mount_check_dead_timeout(struct nfsmount *nmp) | |
2324 | { | |
2325 | struct timeval now; | |
2326 | ||
2327 | if (nmp->nm_state & NFSSTA_DEAD) { | |
2328 | return 1; | |
2329 | } | |
2330 | if (nmp->nm_deadto_start == 0) { | |
2331 | return 0; | |
2332 | } | |
2333 | nfs_is_squishy(nmp); | |
2334 | if (nmp->nm_curdeadtimeout <= 0) { | |
2335 | return 0; | |
2336 | } | |
2337 | microuptime(&now); | |
2338 | if ((now.tv_sec - nmp->nm_deadto_start) < nmp->nm_curdeadtimeout) { | |
2339 | return 0; | |
2340 | } | |
2341 | return 1; | |
2342 | } | |
2343 | ||
2344 | /* | |
2345 | * Call nfs_mount_zombie to remove most of the | |
2346 | * nfs state for the mount, and then ask to be forcibly unmounted. | |
2347 | * | |
2348 | * Assumes the nfs mount structure lock nm_lock is held. | |
2349 | */ | |
2350 | ||
2351 | void | |
2352 | nfs_mount_make_zombie(struct nfsmount *nmp) | |
2353 | { | |
2354 | fsid_t fsid; | |
2355 | ||
2356 | if (!nmp) { | |
2357 | return; | |
2358 | } | |
2359 | ||
2360 | if (nmp->nm_state & NFSSTA_DEAD) { | |
2361 | return; | |
2362 | } | |
2363 | ||
2364 | printf("nfs server %s: %sdead\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, | |
2365 | (nmp->nm_curdeadtimeout != nmp->nm_deadtimeout) ? "squished " : ""); | |
2366 | fsid = vfs_statfs(nmp->nm_mountp)->f_fsid; | |
2367 | lck_mtx_unlock(&nmp->nm_lock); | |
2368 | nfs_mount_zombie(nmp, NFSSTA_DEAD); | |
2369 | vfs_event_signal(&fsid, VQ_DEAD, 0); | |
2370 | lck_mtx_lock(&nmp->nm_lock); | |
2371 | } | |
2372 | ||
2373 | ||
2374 | /* | |
2375 | * NFS callback channel socket state | |
2376 | */ | |
2377 | struct nfs_callback_socket { | |
2378 | TAILQ_ENTRY(nfs_callback_socket) ncbs_link; | |
2379 | socket_t ncbs_so; /* the socket */ | |
2380 | struct sockaddr_storage ncbs_saddr; /* socket address */ | |
2381 | struct nfs_rpc_record_state ncbs_rrs; /* RPC record parsing state */ | |
2382 | time_t ncbs_stamp; /* last accessed at */ | |
2383 | uint32_t ncbs_flags; /* see below */ | |
2384 | }; | |
2385 | #define NCBSOCK_UPCALL 0x0001 | |
2386 | #define NCBSOCK_UPCALLWANT 0x0002 | |
2387 | #define NCBSOCK_DEAD 0x0004 | |
2388 | ||
2389 | #if CONFIG_NFS4 | |
2390 | /* | |
2391 | * NFS callback channel state | |
2392 | * | |
2393 | * One listening socket for accepting socket connections from servers and | |
2394 | * a list of connected sockets to handle callback requests on. | |
2395 | * Mounts registered with the callback channel are assigned IDs and | |
2396 | * put on a list so that the callback request handling code can match | |
2397 | * the requests up with mounts. | |
2398 | */ | |
2399 | socket_t nfs4_cb_so = NULL; | |
2400 | socket_t nfs4_cb_so6 = NULL; | |
2401 | in_port_t nfs4_cb_port = 0; | |
2402 | in_port_t nfs4_cb_port6 = 0; | |
2403 | uint32_t nfs4_cb_id = 0; | |
2404 | uint32_t nfs4_cb_so_usecount = 0; | |
2405 | TAILQ_HEAD(nfs4_cb_sock_list, nfs_callback_socket) nfs4_cb_socks; | |
2406 | TAILQ_HEAD(nfs4_cb_mount_list, nfsmount) nfs4_cb_mounts; | |
2407 | ||
2408 | int nfs4_cb_handler(struct nfs_callback_socket *, mbuf_t); | |
2409 | ||
2410 | /* | |
2411 | * Set up the callback channel for the NFS mount. | |
2412 | * | |
2413 | * Initializes the callback channel socket state and | |
2414 | * assigns a callback ID to the mount. | |
2415 | */ | |
2416 | void | |
2417 | nfs4_mount_callback_setup(struct nfsmount *nmp) | |
2418 | { | |
2419 | struct sockaddr_in sin; | |
2420 | struct sockaddr_in6 sin6; | |
2421 | socket_t so = NULL; | |
2422 | socket_t so6 = NULL; | |
2423 | struct timeval timeo; | |
2424 | int error, on = 1; | |
2425 | in_port_t port; | |
2426 | ||
2427 | lck_mtx_lock(nfs_global_mutex); | |
2428 | if (nfs4_cb_id == 0) { | |
2429 | TAILQ_INIT(&nfs4_cb_mounts); | |
2430 | TAILQ_INIT(&nfs4_cb_socks); | |
2431 | nfs4_cb_id++; | |
2432 | } | |
2433 | nmp->nm_cbid = nfs4_cb_id++; | |
2434 | if (nmp->nm_cbid == 0) { | |
2435 | nmp->nm_cbid = nfs4_cb_id++; | |
2436 | } | |
2437 | nfs4_cb_so_usecount++; | |
2438 | TAILQ_INSERT_HEAD(&nfs4_cb_mounts, nmp, nm_cblink); | |
2439 | ||
2440 | if (nfs4_cb_so) { | |
2441 | lck_mtx_unlock(nfs_global_mutex); | |
2442 | return; | |
2443 | } | |
2444 | ||
2445 | /* IPv4 */ | |
2446 | error = sock_socket(AF_INET, SOCK_STREAM, IPPROTO_TCP, nfs4_cb_accept, NULL, &nfs4_cb_so); | |
2447 | if (error) { | |
2448 | log(LOG_INFO, "nfs callback setup: error %d creating listening IPv4 socket\n", error); | |
2449 | goto fail; | |
2450 | } | |
2451 | so = nfs4_cb_so; | |
2452 | ||
2453 | sock_setsockopt(so, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)); | |
2454 | sin.sin_len = sizeof(struct sockaddr_in); | |
2455 | sin.sin_family = AF_INET; | |
2456 | sin.sin_addr.s_addr = htonl(INADDR_ANY); | |
2457 | sin.sin_port = htons(nfs_callback_port); /* try to use specified port */ | |
2458 | error = sock_bind(so, (struct sockaddr *)&sin); | |
2459 | if (error) { | |
2460 | log(LOG_INFO, "nfs callback setup: error %d binding listening IPv4 socket\n", error); | |
2461 | goto fail; | |
2462 | } | |
2463 | error = sock_getsockname(so, (struct sockaddr *)&sin, sin.sin_len); | |
2464 | if (error) { | |
2465 | log(LOG_INFO, "nfs callback setup: error %d getting listening IPv4 socket port\n", error); | |
2466 | goto fail; | |
2467 | } | |
2468 | nfs4_cb_port = ntohs(sin.sin_port); | |
2469 | ||
2470 | error = sock_listen(so, 32); | |
2471 | if (error) { | |
2472 | log(LOG_INFO, "nfs callback setup: error %d on IPv4 listen\n", error); | |
2473 | goto fail; | |
2474 | } | |
2475 | ||
2476 | /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */ | |
2477 | timeo.tv_usec = 0; | |
2478 | timeo.tv_sec = 60; | |
2479 | error = sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo)); | |
2480 | if (error) { | |
2481 | log(LOG_INFO, "nfs callback setup: error %d setting IPv4 socket rx timeout\n", error); | |
2482 | } | |
2483 | error = sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo)); | |
2484 | if (error) { | |
2485 | log(LOG_INFO, "nfs callback setup: error %d setting IPv4 socket tx timeout\n", error); | |
2486 | } | |
2487 | sock_setsockopt(so, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on)); | |
2488 | sock_setsockopt(so, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on)); | |
2489 | sock_setsockopt(so, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on)); | |
2490 | error = 0; | |
2491 | ||
2492 | /* IPv6 */ | |
2493 | error = sock_socket(AF_INET6, SOCK_STREAM, IPPROTO_TCP, nfs4_cb_accept, NULL, &nfs4_cb_so6); | |
2494 | if (error) { | |
2495 | log(LOG_INFO, "nfs callback setup: error %d creating listening IPv6 socket\n", error); | |
2496 | goto fail; | |
2497 | } | |
2498 | so6 = nfs4_cb_so6; | |
2499 | ||
2500 | sock_setsockopt(so6, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)); | |
2501 | sock_setsockopt(so6, IPPROTO_IPV6, IPV6_V6ONLY, &on, sizeof(on)); | |
2502 | /* try to use specified port or same port as IPv4 */ | |
2503 | port = nfs_callback_port ? nfs_callback_port : nfs4_cb_port; | |
2504 | ipv6_bind_again: | |
2505 | sin6.sin6_len = sizeof(struct sockaddr_in6); | |
2506 | sin6.sin6_family = AF_INET6; | |
2507 | sin6.sin6_addr = in6addr_any; | |
2508 | sin6.sin6_port = htons(port); | |
2509 | error = sock_bind(so6, (struct sockaddr *)&sin6); | |
2510 | if (error) { | |
2511 | if (port != nfs_callback_port) { | |
2512 | /* if we simply tried to match the IPv4 port, then try any port */ | |
2513 | port = 0; | |
2514 | goto ipv6_bind_again; | |
2515 | } | |
2516 | log(LOG_INFO, "nfs callback setup: error %d binding listening IPv6 socket\n", error); | |
2517 | goto fail; | |
2518 | } | |
2519 | error = sock_getsockname(so6, (struct sockaddr *)&sin6, sin6.sin6_len); | |
2520 | if (error) { | |
2521 | log(LOG_INFO, "nfs callback setup: error %d getting listening IPv6 socket port\n", error); | |
2522 | goto fail; | |
2523 | } | |
2524 | nfs4_cb_port6 = ntohs(sin6.sin6_port); | |
2525 | ||
2526 | error = sock_listen(so6, 32); | |
2527 | if (error) { | |
2528 | log(LOG_INFO, "nfs callback setup: error %d on IPv6 listen\n", error); | |
2529 | goto fail; | |
2530 | } | |
2531 | ||
2532 | /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */ | |
2533 | timeo.tv_usec = 0; | |
2534 | timeo.tv_sec = 60; | |
2535 | error = sock_setsockopt(so6, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo)); | |
2536 | if (error) { | |
2537 | log(LOG_INFO, "nfs callback setup: error %d setting IPv6 socket rx timeout\n", error); | |
2538 | } | |
2539 | error = sock_setsockopt(so6, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo)); | |
2540 | if (error) { | |
2541 | log(LOG_INFO, "nfs callback setup: error %d setting IPv6 socket tx timeout\n", error); | |
2542 | } | |
2543 | sock_setsockopt(so6, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on)); | |
2544 | sock_setsockopt(so6, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on)); | |
2545 | sock_setsockopt(so6, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on)); | |
2546 | error = 0; | |
2547 | ||
2548 | fail: | |
2549 | if (error) { | |
2550 | nfs4_cb_so = nfs4_cb_so6 = NULL; | |
2551 | lck_mtx_unlock(nfs_global_mutex); | |
2552 | if (so) { | |
2553 | sock_shutdown(so, SHUT_RDWR); | |
2554 | sock_close(so); | |
2555 | } | |
2556 | if (so6) { | |
2557 | sock_shutdown(so6, SHUT_RDWR); | |
2558 | sock_close(so6); | |
2559 | } | |
2560 | } else { | |
2561 | lck_mtx_unlock(nfs_global_mutex); | |
2562 | } | |
2563 | } | |
2564 | ||
2565 | /* | |
2566 | * Shut down the callback channel for the NFS mount. | |
2567 | * | |
2568 | * Clears the mount's callback ID and releases the mounts | |
2569 | * reference on the callback socket. Last reference dropped | |
2570 | * will also shut down the callback socket(s). | |
2571 | */ | |
2572 | void | |
2573 | nfs4_mount_callback_shutdown(struct nfsmount *nmp) | |
2574 | { | |
2575 | struct nfs_callback_socket *ncbsp; | |
2576 | socket_t so, so6; | |
2577 | struct nfs4_cb_sock_list cb_socks; | |
2578 | struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 }; | |
2579 | ||
2580 | lck_mtx_lock(nfs_global_mutex); | |
2581 | TAILQ_REMOVE(&nfs4_cb_mounts, nmp, nm_cblink); | |
2582 | /* wait for any callbacks in progress to complete */ | |
2583 | while (nmp->nm_cbrefs) { | |
2584 | msleep(&nmp->nm_cbrefs, nfs_global_mutex, PSOCK, "cbshutwait", &ts); | |
2585 | } | |
2586 | nmp->nm_cbid = 0; | |
2587 | if (--nfs4_cb_so_usecount) { | |
2588 | lck_mtx_unlock(nfs_global_mutex); | |
2589 | return; | |
2590 | } | |
2591 | so = nfs4_cb_so; | |
2592 | so6 = nfs4_cb_so6; | |
2593 | nfs4_cb_so = nfs4_cb_so6 = NULL; | |
2594 | TAILQ_INIT(&cb_socks); | |
2595 | TAILQ_CONCAT(&cb_socks, &nfs4_cb_socks, ncbs_link); | |
2596 | lck_mtx_unlock(nfs_global_mutex); | |
2597 | if (so) { | |
2598 | sock_shutdown(so, SHUT_RDWR); | |
2599 | sock_close(so); | |
2600 | } | |
2601 | if (so6) { | |
2602 | sock_shutdown(so6, SHUT_RDWR); | |
2603 | sock_close(so6); | |
2604 | } | |
2605 | while ((ncbsp = TAILQ_FIRST(&cb_socks))) { | |
2606 | TAILQ_REMOVE(&cb_socks, ncbsp, ncbs_link); | |
2607 | sock_shutdown(ncbsp->ncbs_so, SHUT_RDWR); | |
2608 | sock_close(ncbsp->ncbs_so); | |
2609 | nfs_rpc_record_state_cleanup(&ncbsp->ncbs_rrs); | |
2610 | FREE(ncbsp, M_TEMP); | |
2611 | } | |
2612 | } | |
2613 | ||
2614 | /* | |
2615 | * Check periodically for stale/unused nfs callback sockets | |
2616 | */ | |
2617 | #define NFS4_CB_TIMER_PERIOD 30 | |
2618 | #define NFS4_CB_IDLE_MAX 300 | |
2619 | void | |
2620 | nfs4_callback_timer(__unused void *param0, __unused void *param1) | |
2621 | { | |
2622 | struct nfs_callback_socket *ncbsp, *nextncbsp; | |
2623 | struct timeval now; | |
2624 | ||
2625 | loop: | |
2626 | lck_mtx_lock(nfs_global_mutex); | |
2627 | if (TAILQ_EMPTY(&nfs4_cb_socks)) { | |
2628 | nfs4_callback_timer_on = 0; | |
2629 | lck_mtx_unlock(nfs_global_mutex); | |
2630 | return; | |
2631 | } | |
2632 | microuptime(&now); | |
2633 | TAILQ_FOREACH_SAFE(ncbsp, &nfs4_cb_socks, ncbs_link, nextncbsp) { | |
2634 | if (!(ncbsp->ncbs_flags & NCBSOCK_DEAD) && | |
2635 | (now.tv_sec < (ncbsp->ncbs_stamp + NFS4_CB_IDLE_MAX))) { | |
2636 | continue; | |
2637 | } | |
2638 | TAILQ_REMOVE(&nfs4_cb_socks, ncbsp, ncbs_link); | |
2639 | lck_mtx_unlock(nfs_global_mutex); | |
2640 | sock_shutdown(ncbsp->ncbs_so, SHUT_RDWR); | |
2641 | sock_close(ncbsp->ncbs_so); | |
2642 | nfs_rpc_record_state_cleanup(&ncbsp->ncbs_rrs); | |
2643 | FREE(ncbsp, M_TEMP); | |
2644 | goto loop; | |
2645 | } | |
2646 | nfs4_callback_timer_on = 1; | |
2647 | nfs_interval_timer_start(nfs4_callback_timer_call, | |
2648 | NFS4_CB_TIMER_PERIOD * 1000); | |
2649 | lck_mtx_unlock(nfs_global_mutex); | |
2650 | } | |
2651 | ||
2652 | /* | |
2653 | * Accept a new callback socket. | |
2654 | */ | |
2655 | void | |
2656 | nfs4_cb_accept(socket_t so, __unused void *arg, __unused int waitflag) | |
2657 | { | |
2658 | socket_t newso = NULL; | |
2659 | struct nfs_callback_socket *ncbsp; | |
2660 | struct nfsmount *nmp; | |
2661 | struct timeval timeo, now; | |
2662 | int error, on = 1, ip; | |
2663 | ||
2664 | if (so == nfs4_cb_so) { | |
2665 | ip = 4; | |
2666 | } else if (so == nfs4_cb_so6) { | |
2667 | ip = 6; | |
2668 | } else { | |
2669 | return; | |
2670 | } | |
2671 | ||
2672 | /* allocate/initialize a new nfs_callback_socket */ | |
2673 | MALLOC(ncbsp, struct nfs_callback_socket *, sizeof(struct nfs_callback_socket), M_TEMP, M_WAITOK); | |
2674 | if (!ncbsp) { | |
2675 | log(LOG_ERR, "nfs callback accept: no memory for new socket\n"); | |
2676 | return; | |
2677 | } | |
2678 | bzero(ncbsp, sizeof(*ncbsp)); | |
2679 | ncbsp->ncbs_saddr.ss_len = (ip == 4) ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6); | |
2680 | nfs_rpc_record_state_init(&ncbsp->ncbs_rrs); | |
2681 | ||
2682 | /* accept a new socket */ | |
2683 | error = sock_accept(so, (struct sockaddr*)&ncbsp->ncbs_saddr, | |
2684 | ncbsp->ncbs_saddr.ss_len, MSG_DONTWAIT, | |
2685 | nfs4_cb_rcv, ncbsp, &newso); | |
2686 | if (error) { | |
2687 | log(LOG_INFO, "nfs callback accept: error %d accepting IPv%d socket\n", error, ip); | |
2688 | FREE(ncbsp, M_TEMP); | |
2689 | return; | |
2690 | } | |
2691 | ||
2692 | /* set up the new socket */ | |
2693 | /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */ | |
2694 | timeo.tv_usec = 0; | |
2695 | timeo.tv_sec = 60; | |
2696 | error = sock_setsockopt(newso, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo)); | |
2697 | if (error) { | |
2698 | log(LOG_INFO, "nfs callback socket: error %d setting IPv%d socket rx timeout\n", error, ip); | |
2699 | } | |
2700 | error = sock_setsockopt(newso, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo)); | |
2701 | if (error) { | |
2702 | log(LOG_INFO, "nfs callback socket: error %d setting IPv%d socket tx timeout\n", error, ip); | |
2703 | } | |
2704 | sock_setsockopt(newso, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on)); | |
2705 | sock_setsockopt(newso, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)); | |
2706 | sock_setsockopt(newso, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on)); | |
2707 | sock_setsockopt(newso, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on)); | |
2708 | ||
2709 | ncbsp->ncbs_so = newso; | |
2710 | microuptime(&now); | |
2711 | ncbsp->ncbs_stamp = now.tv_sec; | |
2712 | ||
2713 | lck_mtx_lock(nfs_global_mutex); | |
2714 | ||
2715 | /* add it to the list */ | |
2716 | TAILQ_INSERT_HEAD(&nfs4_cb_socks, ncbsp, ncbs_link); | |
2717 | ||
2718 | /* verify it's from a host we have mounted */ | |
2719 | TAILQ_FOREACH(nmp, &nfs4_cb_mounts, nm_cblink) { | |
2720 | /* check if socket's source address matches this mount's server address */ | |
2721 | if (!nmp->nm_saddr) { | |
2722 | continue; | |
2723 | } | |
2724 | if (nfs_sockaddr_cmp((struct sockaddr*)&ncbsp->ncbs_saddr, nmp->nm_saddr) == 0) { | |
2725 | break; | |
2726 | } | |
2727 | } | |
2728 | if (!nmp) { /* we don't want this socket, mark it dead */ | |
2729 | ncbsp->ncbs_flags |= NCBSOCK_DEAD; | |
2730 | } | |
2731 | ||
2732 | /* make sure the callback socket cleanup timer is running */ | |
2733 | /* (shorten the timer if we've got a socket we don't want) */ | |
2734 | if (!nfs4_callback_timer_on) { | |
2735 | nfs4_callback_timer_on = 1; | |
2736 | nfs_interval_timer_start(nfs4_callback_timer_call, | |
2737 | !nmp ? 500 : (NFS4_CB_TIMER_PERIOD * 1000)); | |
2738 | } else if (!nmp && (nfs4_callback_timer_on < 2)) { | |
2739 | nfs4_callback_timer_on = 2; | |
2740 | thread_call_cancel(nfs4_callback_timer_call); | |
2741 | nfs_interval_timer_start(nfs4_callback_timer_call, 500); | |
2742 | } | |
2743 | ||
2744 | lck_mtx_unlock(nfs_global_mutex); | |
2745 | } | |
2746 | ||
2747 | /* | |
2748 | * Receive mbufs from callback sockets into RPC records and process each record. | |
2749 | * Detect connection has been closed and shut down. | |
2750 | */ | |
2751 | void | |
2752 | nfs4_cb_rcv(socket_t so, void *arg, __unused int waitflag) | |
2753 | { | |
2754 | struct nfs_callback_socket *ncbsp = arg; | |
2755 | struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 }; | |
2756 | struct timeval now; | |
2757 | mbuf_t m; | |
2758 | int error = 0, recv = 1; | |
2759 | ||
2760 | lck_mtx_lock(nfs_global_mutex); | |
2761 | while (ncbsp->ncbs_flags & NCBSOCK_UPCALL) { | |
2762 | /* wait if upcall is already in progress */ | |
2763 | ncbsp->ncbs_flags |= NCBSOCK_UPCALLWANT; | |
2764 | msleep(ncbsp, nfs_global_mutex, PSOCK, "cbupcall", &ts); | |
2765 | } | |
2766 | ncbsp->ncbs_flags |= NCBSOCK_UPCALL; | |
2767 | lck_mtx_unlock(nfs_global_mutex); | |
2768 | ||
2769 | /* loop while we make error-free progress */ | |
2770 | while (!error && recv) { | |
2771 | error = nfs_rpc_record_read(so, &ncbsp->ncbs_rrs, MSG_DONTWAIT, &recv, &m); | |
2772 | if (m) { /* handle the request */ | |
2773 | error = nfs4_cb_handler(ncbsp, m); | |
2774 | } | |
2775 | } | |
2776 | ||
2777 | /* note: no error and no data indicates server closed its end */ | |
2778 | if ((error != EWOULDBLOCK) && (error || !recv)) { | |
2779 | /* | |
2780 | * Socket is either being closed or should be. | |
2781 | * We can't close the socket in the context of the upcall. | |
2782 | * So we mark it as dead and leave it for the cleanup timer to reap. | |
2783 | */ | |
2784 | ncbsp->ncbs_stamp = 0; | |
2785 | ncbsp->ncbs_flags |= NCBSOCK_DEAD; | |
2786 | } else { | |
2787 | microuptime(&now); | |
2788 | ncbsp->ncbs_stamp = now.tv_sec; | |
2789 | } | |
2790 | ||
2791 | lck_mtx_lock(nfs_global_mutex); | |
2792 | ncbsp->ncbs_flags &= ~NCBSOCK_UPCALL; | |
2793 | lck_mtx_unlock(nfs_global_mutex); | |
2794 | wakeup(ncbsp); | |
2795 | } | |
2796 | ||
2797 | /* | |
2798 | * Handle an NFS callback channel request. | |
2799 | */ | |
2800 | int | |
2801 | nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq) | |
2802 | { | |
2803 | socket_t so = ncbsp->ncbs_so; | |
2804 | struct nfsm_chain nmreq, nmrep; | |
2805 | mbuf_t mhead = NULL, mrest = NULL, m; | |
2806 | struct msghdr msg; | |
2807 | struct nfsmount *nmp; | |
2808 | fhandle_t fh; | |
2809 | nfsnode_t np; | |
2810 | nfs_stateid stateid; | |
2811 | uint32_t bitmap[NFS_ATTR_BITMAP_LEN], rbitmap[NFS_ATTR_BITMAP_LEN], bmlen, truncate, attrbytes; | |
2812 | uint32_t val, xid, procnum, taglen, cbid, numops, op, status; | |
2813 | uint32_t auth_type, auth_len; | |
2814 | uint32_t numres, *pnumres; | |
2815 | int error = 0, replen, len; | |
2816 | size_t sentlen = 0; | |
2817 | ||
2818 | xid = numops = op = status = procnum = taglen = cbid = 0; | |
2819 | ||
2820 | nfsm_chain_dissect_init(error, &nmreq, mreq); | |
2821 | nfsm_chain_get_32(error, &nmreq, xid); // RPC XID | |
2822 | nfsm_chain_get_32(error, &nmreq, val); // RPC Call | |
2823 | nfsm_assert(error, (val == RPC_CALL), EBADRPC); | |
2824 | nfsm_chain_get_32(error, &nmreq, val); // RPC Version | |
2825 | nfsm_assert(error, (val == RPC_VER2), ERPCMISMATCH); | |
2826 | nfsm_chain_get_32(error, &nmreq, val); // RPC Program Number | |
2827 | nfsm_assert(error, (val == NFS4_CALLBACK_PROG), EPROGUNAVAIL); | |
2828 | nfsm_chain_get_32(error, &nmreq, val); // NFS Callback Program Version Number | |
2829 | nfsm_assert(error, (val == NFS4_CALLBACK_PROG_VERSION), EPROGMISMATCH); | |
2830 | nfsm_chain_get_32(error, &nmreq, procnum); // NFS Callback Procedure Number | |
2831 | nfsm_assert(error, (procnum <= NFSPROC4_CB_COMPOUND), EPROCUNAVAIL); | |
2832 | ||
2833 | /* Handle authentication */ | |
2834 | /* XXX just ignore auth for now - handling kerberos may be tricky */ | |
2835 | nfsm_chain_get_32(error, &nmreq, auth_type); // RPC Auth Flavor | |
2836 | nfsm_chain_get_32(error, &nmreq, auth_len); // RPC Auth Length | |
2837 | nfsm_assert(error, (auth_len <= RPCAUTH_MAXSIZ), EBADRPC); | |
2838 | if (!error && (auth_len > 0)) { | |
2839 | nfsm_chain_adv(error, &nmreq, nfsm_rndup(auth_len)); | |
2840 | } | |
2841 | nfsm_chain_adv(error, &nmreq, NFSX_UNSIGNED); // verifier flavor (should be AUTH_NONE) | |
2842 | nfsm_chain_get_32(error, &nmreq, auth_len); // verifier length | |
2843 | nfsm_assert(error, (auth_len <= RPCAUTH_MAXSIZ), EBADRPC); | |
2844 | if (!error && (auth_len > 0)) { | |
2845 | nfsm_chain_adv(error, &nmreq, nfsm_rndup(auth_len)); | |
2846 | } | |
2847 | if (error) { | |
2848 | status = error; | |
2849 | error = 0; | |
2850 | goto nfsmout; | |
2851 | } | |
2852 | ||
2853 | switch (procnum) { | |
2854 | case NFSPROC4_CB_NULL: | |
2855 | status = NFSERR_RETVOID; | |
2856 | break; | |
2857 | case NFSPROC4_CB_COMPOUND: | |
2858 | /* tag, minorversion, cb ident, numops, op array */ | |
2859 | nfsm_chain_get_32(error, &nmreq, taglen); /* tag length */ | |
2860 | nfsm_assert(error, (val <= NFS4_OPAQUE_LIMIT), EBADRPC); | |
2861 | ||
2862 | /* start building the body of the response */ | |
2863 | nfsm_mbuf_get(error, &mrest, nfsm_rndup(taglen) + 5 * NFSX_UNSIGNED); | |
2864 | nfsm_chain_init(&nmrep, mrest); | |
2865 | ||
2866 | /* copy tag from request to response */ | |
2867 | nfsm_chain_add_32(error, &nmrep, taglen); /* tag length */ | |
2868 | for (len = (int)taglen; !error && (len > 0); len -= NFSX_UNSIGNED) { | |
2869 | nfsm_chain_get_32(error, &nmreq, val); | |
2870 | nfsm_chain_add_32(error, &nmrep, val); | |
2871 | } | |
2872 | ||
2873 | /* insert number of results placeholder */ | |
2874 | numres = 0; | |
2875 | nfsm_chain_add_32(error, &nmrep, numres); | |
2876 | pnumres = (uint32_t*)(nmrep.nmc_ptr - NFSX_UNSIGNED); | |
2877 | ||
2878 | nfsm_chain_get_32(error, &nmreq, val); /* minorversion */ | |
2879 | nfsm_assert(error, (val == 0), NFSERR_MINOR_VERS_MISMATCH); | |
2880 | nfsm_chain_get_32(error, &nmreq, cbid); /* callback ID */ | |
2881 | nfsm_chain_get_32(error, &nmreq, numops); /* number of operations */ | |
2882 | if (error) { | |
2883 | if ((error == EBADRPC) || (error == NFSERR_MINOR_VERS_MISMATCH)) { | |
2884 | status = error; | |
2885 | } else if ((error == ENOBUFS) || (error == ENOMEM)) { | |
2886 | status = NFSERR_RESOURCE; | |
2887 | } else { | |
2888 | status = NFSERR_SERVERFAULT; | |
2889 | } | |
2890 | error = 0; | |
2891 | nfsm_chain_null(&nmrep); | |
2892 | goto nfsmout; | |
2893 | } | |
2894 | /* match the callback ID to a registered mount */ | |
2895 | lck_mtx_lock(nfs_global_mutex); | |
2896 | TAILQ_FOREACH(nmp, &nfs4_cb_mounts, nm_cblink) { | |
2897 | if (nmp->nm_cbid != cbid) { | |
2898 | continue; | |
2899 | } | |
2900 | /* verify socket's source address matches this mount's server address */ | |
2901 | if (!nmp->nm_saddr) { | |
2902 | continue; | |
2903 | } | |
2904 | if (nfs_sockaddr_cmp((struct sockaddr*)&ncbsp->ncbs_saddr, nmp->nm_saddr) == 0) { | |
2905 | break; | |
2906 | } | |
2907 | } | |
2908 | /* mark the NFS mount as busy */ | |
2909 | if (nmp) { | |
2910 | nmp->nm_cbrefs++; | |
2911 | } | |
2912 | lck_mtx_unlock(nfs_global_mutex); | |
2913 | if (!nmp) { | |
2914 | /* if no mount match, just drop socket. */ | |
2915 | error = EPERM; | |
2916 | nfsm_chain_null(&nmrep); | |
2917 | goto out; | |
2918 | } | |
2919 | ||
2920 | /* process ops, adding results to mrest */ | |
2921 | while (numops > 0) { | |
2922 | numops--; | |
2923 | nfsm_chain_get_32(error, &nmreq, op); | |
2924 | if (error) { | |
2925 | break; | |
2926 | } | |
2927 | switch (op) { | |
2928 | case NFS_OP_CB_GETATTR: | |
2929 | // (FH, BITMAP) -> (STATUS, BITMAP, ATTRS) | |
2930 | np = NULL; | |
2931 | nfsm_chain_get_fh(error, &nmreq, NFS_VER4, &fh); | |
2932 | bmlen = NFS_ATTR_BITMAP_LEN; | |
2933 | nfsm_chain_get_bitmap(error, &nmreq, bitmap, bmlen); | |
2934 | if (error) { | |
2935 | status = error; | |
2936 | error = 0; | |
2937 | numops = 0; /* don't process any more ops */ | |
2938 | } else { | |
2939 | /* find the node for the file handle */ | |
2940 | error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh.fh_data, fh.fh_len, NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &np); | |
2941 | if (error || !np) { | |
2942 | status = NFSERR_BADHANDLE; | |
2943 | error = 0; | |
2944 | np = NULL; | |
2945 | numops = 0; /* don't process any more ops */ | |
2946 | } | |
2947 | } | |
2948 | nfsm_chain_add_32(error, &nmrep, op); | |
2949 | nfsm_chain_add_32(error, &nmrep, status); | |
2950 | if (!error && (status == EBADRPC)) { | |
2951 | error = status; | |
2952 | } | |
2953 | if (np) { | |
2954 | /* only allow returning size, change, and mtime attrs */ | |
2955 | NFS_CLEAR_ATTRIBUTES(&rbitmap); | |
2956 | attrbytes = 0; | |
2957 | if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_CHANGE)) { | |
2958 | NFS_BITMAP_SET(&rbitmap, NFS_FATTR_CHANGE); | |
2959 | attrbytes += 2 * NFSX_UNSIGNED; | |
2960 | } | |
2961 | if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_SIZE)) { | |
2962 | NFS_BITMAP_SET(&rbitmap, NFS_FATTR_SIZE); | |
2963 | attrbytes += 2 * NFSX_UNSIGNED; | |
2964 | } | |
2965 | if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_TIME_MODIFY)) { | |
2966 | NFS_BITMAP_SET(&rbitmap, NFS_FATTR_TIME_MODIFY); | |
2967 | attrbytes += 3 * NFSX_UNSIGNED; | |
2968 | } | |
2969 | nfsm_chain_add_bitmap(error, &nmrep, rbitmap, NFS_ATTR_BITMAP_LEN); | |
2970 | nfsm_chain_add_32(error, &nmrep, attrbytes); | |
2971 | if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_CHANGE)) { | |
2972 | nfsm_chain_add_64(error, &nmrep, | |
2973 | np->n_vattr.nva_change + ((np->n_flag & NMODIFIED) ? 1 : 0)); | |
2974 | } | |
2975 | if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_SIZE)) { | |
2976 | nfsm_chain_add_64(error, &nmrep, np->n_size); | |
2977 | } | |
2978 | if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_TIME_MODIFY)) { | |
2979 | nfsm_chain_add_64(error, &nmrep, np->n_vattr.nva_timesec[NFSTIME_MODIFY]); | |
2980 | nfsm_chain_add_32(error, &nmrep, np->n_vattr.nva_timensec[NFSTIME_MODIFY]); | |
2981 | } | |
2982 | nfs_node_unlock(np); | |
2983 | vnode_put(NFSTOV(np)); | |
2984 | np = NULL; | |
2985 | } | |
2986 | /* | |
2987 | * If we hit an error building the reply, we can't easily back up. | |
2988 | * So we'll just update the status and hope the server ignores the | |
2989 | * extra garbage. | |
2990 | */ | |
2991 | break; | |
2992 | case NFS_OP_CB_RECALL: | |
2993 | // (STATEID, TRUNCATE, FH) -> (STATUS) | |
2994 | np = NULL; | |
2995 | nfsm_chain_get_stateid(error, &nmreq, &stateid); | |
2996 | nfsm_chain_get_32(error, &nmreq, truncate); | |
2997 | nfsm_chain_get_fh(error, &nmreq, NFS_VER4, &fh); | |
2998 | if (error) { | |
2999 | status = error; | |
3000 | error = 0; | |
3001 | numops = 0; /* don't process any more ops */ | |
3002 | } else { | |
3003 | /* find the node for the file handle */ | |
3004 | error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh.fh_data, fh.fh_len, NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &np); | |
3005 | if (error || !np) { | |
3006 | status = NFSERR_BADHANDLE; | |
3007 | error = 0; | |
3008 | np = NULL; | |
3009 | numops = 0; /* don't process any more ops */ | |
3010 | } else if (!(np->n_openflags & N_DELEG_MASK) || | |
3011 | bcmp(&np->n_dstateid, &stateid, sizeof(stateid))) { | |
3012 | /* delegation stateid state doesn't match */ | |
3013 | status = NFSERR_BAD_STATEID; | |
3014 | numops = 0; /* don't process any more ops */ | |
3015 | } | |
3016 | if (!status) { /* add node to recall queue, and wake socket thread */ | |
3017 | nfs4_delegation_return_enqueue(np); | |
3018 | } | |
3019 | if (np) { | |
3020 | nfs_node_unlock(np); | |
3021 | vnode_put(NFSTOV(np)); | |
3022 | } | |
3023 | } | |
3024 | nfsm_chain_add_32(error, &nmrep, op); | |
3025 | nfsm_chain_add_32(error, &nmrep, status); | |
3026 | if (!error && (status == EBADRPC)) { | |
3027 | error = status; | |
3028 | } | |
3029 | break; | |
3030 | case NFS_OP_CB_ILLEGAL: | |
3031 | default: | |
3032 | nfsm_chain_add_32(error, &nmrep, NFS_OP_CB_ILLEGAL); | |
3033 | status = NFSERR_OP_ILLEGAL; | |
3034 | nfsm_chain_add_32(error, &nmrep, status); | |
3035 | numops = 0; /* don't process any more ops */ | |
3036 | break; | |
3037 | } | |
3038 | numres++; | |
3039 | } | |
3040 | ||
3041 | if (!status && error) { | |
3042 | if (error == EBADRPC) { | |
3043 | status = error; | |
3044 | } else if ((error == ENOBUFS) || (error == ENOMEM)) { | |
3045 | status = NFSERR_RESOURCE; | |
3046 | } else { | |
3047 | status = NFSERR_SERVERFAULT; | |
3048 | } | |
3049 | error = 0; | |
3050 | } | |
3051 | ||
3052 | /* Now, set the numres field */ | |
3053 | *pnumres = txdr_unsigned(numres); | |
3054 | nfsm_chain_build_done(error, &nmrep); | |
3055 | nfsm_chain_null(&nmrep); | |
3056 | ||
3057 | /* drop the callback reference on the mount */ | |
3058 | lck_mtx_lock(nfs_global_mutex); | |
3059 | nmp->nm_cbrefs--; | |
3060 | if (!nmp->nm_cbid) { | |
3061 | wakeup(&nmp->nm_cbrefs); | |
3062 | } | |
3063 | lck_mtx_unlock(nfs_global_mutex); | |
3064 | break; | |
3065 | } | |
3066 | ||
3067 | nfsmout: | |
3068 | if (status == EBADRPC) { | |
3069 | OSAddAtomic64(1, &nfsstats.rpcinvalid); | |
3070 | } | |
3071 | ||
3072 | /* build reply header */ | |
3073 | error = mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_DATA, &mhead); | |
3074 | nfsm_chain_init(&nmrep, mhead); | |
3075 | nfsm_chain_add_32(error, &nmrep, 0); /* insert space for an RPC record mark */ | |
3076 | nfsm_chain_add_32(error, &nmrep, xid); | |
3077 | nfsm_chain_add_32(error, &nmrep, RPC_REPLY); | |
3078 | if ((status == ERPCMISMATCH) || (status & NFSERR_AUTHERR)) { | |
3079 | nfsm_chain_add_32(error, &nmrep, RPC_MSGDENIED); | |
3080 | if (status & NFSERR_AUTHERR) { | |
3081 | nfsm_chain_add_32(error, &nmrep, RPC_AUTHERR); | |
3082 | nfsm_chain_add_32(error, &nmrep, (status & ~NFSERR_AUTHERR)); | |
3083 | } else { | |
3084 | nfsm_chain_add_32(error, &nmrep, RPC_MISMATCH); | |
3085 | nfsm_chain_add_32(error, &nmrep, RPC_VER2); | |
3086 | nfsm_chain_add_32(error, &nmrep, RPC_VER2); | |
3087 | } | |
3088 | } else { | |
3089 | /* reply status */ | |
3090 | nfsm_chain_add_32(error, &nmrep, RPC_MSGACCEPTED); | |
3091 | /* XXX RPCAUTH_NULL verifier */ | |
3092 | nfsm_chain_add_32(error, &nmrep, RPCAUTH_NULL); | |
3093 | nfsm_chain_add_32(error, &nmrep, 0); | |
3094 | /* accepted status */ | |
3095 | switch (status) { | |
3096 | case EPROGUNAVAIL: | |
3097 | nfsm_chain_add_32(error, &nmrep, RPC_PROGUNAVAIL); | |
3098 | break; | |
3099 | case EPROGMISMATCH: | |
3100 | nfsm_chain_add_32(error, &nmrep, RPC_PROGMISMATCH); | |
3101 | nfsm_chain_add_32(error, &nmrep, NFS4_CALLBACK_PROG_VERSION); | |
3102 | nfsm_chain_add_32(error, &nmrep, NFS4_CALLBACK_PROG_VERSION); | |
3103 | break; | |
3104 | case EPROCUNAVAIL: | |
3105 | nfsm_chain_add_32(error, &nmrep, RPC_PROCUNAVAIL); | |
3106 | break; | |
3107 | case EBADRPC: | |
3108 | nfsm_chain_add_32(error, &nmrep, RPC_GARBAGE); | |
3109 | break; | |
3110 | default: | |
3111 | nfsm_chain_add_32(error, &nmrep, RPC_SUCCESS); | |
3112 | if (status != NFSERR_RETVOID) { | |
3113 | nfsm_chain_add_32(error, &nmrep, status); | |
3114 | } | |
3115 | break; | |
3116 | } | |
3117 | } | |
3118 | nfsm_chain_build_done(error, &nmrep); | |
3119 | if (error) { | |
3120 | nfsm_chain_null(&nmrep); | |
3121 | goto out; | |
3122 | } | |
3123 | error = mbuf_setnext(nmrep.nmc_mcur, mrest); | |
3124 | if (error) { | |
3125 | printf("nfs cb: mbuf_setnext failed %d\n", error); | |
3126 | goto out; | |
3127 | } | |
3128 | mrest = NULL; | |
3129 | /* Calculate the size of the reply */ | |
3130 | replen = 0; | |
3131 | for (m = nmrep.nmc_mhead; m; m = mbuf_next(m)) { | |
3132 | replen += mbuf_len(m); | |
3133 | } | |
3134 | mbuf_pkthdr_setlen(mhead, replen); | |
3135 | error = mbuf_pkthdr_setrcvif(mhead, NULL); | |
3136 | nfsm_chain_set_recmark(error, &nmrep, (replen - NFSX_UNSIGNED) | 0x80000000); | |
3137 | nfsm_chain_null(&nmrep); | |
3138 | ||
3139 | /* send the reply */ | |
3140 | bzero(&msg, sizeof(msg)); | |
3141 | error = sock_sendmbuf(so, &msg, mhead, 0, &sentlen); | |
3142 | mhead = NULL; | |
3143 | if (!error && ((int)sentlen != replen)) { | |
3144 | error = EWOULDBLOCK; | |
3145 | } | |
3146 | if (error == EWOULDBLOCK) { /* inability to send response is considered fatal */ | |
3147 | error = ETIMEDOUT; | |
3148 | } | |
3149 | out: | |
3150 | if (error) { | |
3151 | nfsm_chain_cleanup(&nmrep); | |
3152 | } | |
3153 | if (mhead) { | |
3154 | mbuf_freem(mhead); | |
3155 | } | |
3156 | if (mrest) { | |
3157 | mbuf_freem(mrest); | |
3158 | } | |
3159 | if (mreq) { | |
3160 | mbuf_freem(mreq); | |
3161 | } | |
3162 | return error; | |
3163 | } | |
3164 | #endif /* CONFIG_NFS4 */ | |
3165 | ||
3166 | /* | |
3167 | * Initialize an nfs_rpc_record_state structure. | |
3168 | */ | |
3169 | void | |
3170 | nfs_rpc_record_state_init(struct nfs_rpc_record_state *nrrsp) | |
3171 | { | |
3172 | bzero(nrrsp, sizeof(*nrrsp)); | |
3173 | nrrsp->nrrs_markerleft = sizeof(nrrsp->nrrs_fragleft); | |
3174 | } | |
3175 | ||
3176 | /* | |
3177 | * Clean up an nfs_rpc_record_state structure. | |
3178 | */ | |
3179 | void | |
3180 | nfs_rpc_record_state_cleanup(struct nfs_rpc_record_state *nrrsp) | |
3181 | { | |
3182 | if (nrrsp->nrrs_m) { | |
3183 | mbuf_freem(nrrsp->nrrs_m); | |
3184 | nrrsp->nrrs_m = nrrsp->nrrs_mlast = NULL; | |
3185 | } | |
3186 | } | |
3187 | ||
3188 | /* | |
3189 | * Read the next (marked) RPC record from the socket. | |
3190 | * | |
3191 | * *recvp returns if any data was received. | |
3192 | * *mp returns the next complete RPC record | |
3193 | */ | |
3194 | int | |
3195 | nfs_rpc_record_read(socket_t so, struct nfs_rpc_record_state *nrrsp, int flags, int *recvp, mbuf_t *mp) | |
3196 | { | |
3197 | struct iovec aio; | |
3198 | struct msghdr msg; | |
3199 | size_t rcvlen; | |
3200 | int error = 0; | |
3201 | mbuf_t m; | |
3202 | ||
3203 | *recvp = 0; | |
3204 | *mp = NULL; | |
3205 | ||
3206 | /* read the TCP RPC record marker */ | |
3207 | while (!error && nrrsp->nrrs_markerleft) { | |
3208 | aio.iov_base = ((char*)&nrrsp->nrrs_fragleft + | |
3209 | sizeof(nrrsp->nrrs_fragleft) - nrrsp->nrrs_markerleft); | |
3210 | aio.iov_len = nrrsp->nrrs_markerleft; | |
3211 | bzero(&msg, sizeof(msg)); | |
3212 | msg.msg_iov = &aio; | |
3213 | msg.msg_iovlen = 1; | |
3214 | error = sock_receive(so, &msg, flags, &rcvlen); | |
3215 | if (error || !rcvlen) { | |
3216 | break; | |
3217 | } | |
3218 | *recvp = 1; | |
3219 | nrrsp->nrrs_markerleft -= rcvlen; | |
3220 | if (nrrsp->nrrs_markerleft) { | |
3221 | continue; | |
3222 | } | |
3223 | /* record marker complete */ | |
3224 | nrrsp->nrrs_fragleft = ntohl(nrrsp->nrrs_fragleft); | |
3225 | if (nrrsp->nrrs_fragleft & 0x80000000) { | |
3226 | nrrsp->nrrs_lastfrag = 1; | |
3227 | nrrsp->nrrs_fragleft &= ~0x80000000; | |
3228 | } | |
3229 | nrrsp->nrrs_reclen += nrrsp->nrrs_fragleft; | |
3230 | if (nrrsp->nrrs_reclen > NFS_MAXPACKET) { | |
3231 | /* This is SERIOUS! We are out of sync with the sender. */ | |
3232 | log(LOG_ERR, "impossible RPC record length (%d) on callback", nrrsp->nrrs_reclen); | |
3233 | error = EFBIG; | |
3234 | } | |
3235 | } | |
3236 | ||
3237 | /* read the TCP RPC record fragment */ | |
3238 | while (!error && !nrrsp->nrrs_markerleft && nrrsp->nrrs_fragleft) { | |
3239 | m = NULL; | |
3240 | rcvlen = nrrsp->nrrs_fragleft; | |
3241 | error = sock_receivembuf(so, NULL, &m, flags, &rcvlen); | |
3242 | if (error || !rcvlen || !m) { | |
3243 | break; | |
3244 | } | |
3245 | *recvp = 1; | |
3246 | /* append mbufs to list */ | |
3247 | nrrsp->nrrs_fragleft -= rcvlen; | |
3248 | if (!nrrsp->nrrs_m) { | |
3249 | nrrsp->nrrs_m = m; | |
3250 | } else { | |
3251 | error = mbuf_setnext(nrrsp->nrrs_mlast, m); | |
3252 | if (error) { | |
3253 | printf("nfs tcp rcv: mbuf_setnext failed %d\n", error); | |
3254 | mbuf_freem(m); | |
3255 | break; | |
3256 | } | |
3257 | } | |
3258 | while (mbuf_next(m)) { | |
3259 | m = mbuf_next(m); | |
3260 | } | |
3261 | nrrsp->nrrs_mlast = m; | |
3262 | } | |
3263 | ||
3264 | /* done reading fragment? */ | |
3265 | if (!error && !nrrsp->nrrs_markerleft && !nrrsp->nrrs_fragleft) { | |
3266 | /* reset socket fragment parsing state */ | |
3267 | nrrsp->nrrs_markerleft = sizeof(nrrsp->nrrs_fragleft); | |
3268 | if (nrrsp->nrrs_lastfrag) { | |
3269 | /* RPC record complete */ | |
3270 | *mp = nrrsp->nrrs_m; | |
3271 | /* reset socket record parsing state */ | |
3272 | nrrsp->nrrs_reclen = 0; | |
3273 | nrrsp->nrrs_m = nrrsp->nrrs_mlast = NULL; | |
3274 | nrrsp->nrrs_lastfrag = 0; | |
3275 | } | |
3276 | } | |
3277 | ||
3278 | return error; | |
3279 | } | |
3280 | ||
3281 | ||
3282 | ||
3283 | /* | |
3284 | * The NFS client send routine. | |
3285 | * | |
3286 | * Send the given NFS request out the mount's socket. | |
3287 | * Holds nfs_sndlock() for the duration of this call. | |
3288 | * | |
3289 | * - check for request termination (sigintr) | |
3290 | * - wait for reconnect, if necessary | |
3291 | * - UDP: check the congestion window | |
3292 | * - make a copy of the request to send | |
3293 | * - UDP: update the congestion window | |
3294 | * - send the request | |
3295 | * | |
3296 | * If sent successfully, R_MUSTRESEND and R_RESENDERR are cleared. | |
3297 | * rexmit count is also updated if this isn't the first send. | |
3298 | * | |
3299 | * If the send is not successful, make sure R_MUSTRESEND is set. | |
3300 | * If this wasn't the first transmit, set R_RESENDERR. | |
3301 | * Also, undo any UDP congestion window changes made. | |
3302 | * | |
3303 | * If the error appears to indicate that the socket should | |
3304 | * be reconnected, mark the socket for reconnection. | |
3305 | * | |
3306 | * Only return errors when the request should be aborted. | |
3307 | */ | |
3308 | int | |
3309 | nfs_send(struct nfsreq *req, int wait) | |
3310 | { | |
3311 | struct nfsmount *nmp; | |
3312 | struct nfs_socket *nso; | |
3313 | int error, error2, sotype, rexmit, slpflag = 0, needrecon; | |
3314 | struct msghdr msg; | |
3315 | struct sockaddr *sendnam; | |
3316 | mbuf_t mreqcopy; | |
3317 | size_t sentlen = 0; | |
3318 | struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 }; | |
3319 | ||
3320 | again: | |
3321 | error = nfs_sndlock(req); | |
3322 | if (error) { | |
3323 | lck_mtx_lock(&req->r_mtx); | |
3324 | req->r_error = error; | |
3325 | req->r_flags &= ~R_SENDING; | |
3326 | lck_mtx_unlock(&req->r_mtx); | |
3327 | return error; | |
3328 | } | |
3329 | ||
3330 | error = nfs_sigintr(req->r_nmp, req, NULL, 0); | |
3331 | if (error) { | |
3332 | nfs_sndunlock(req); | |
3333 | lck_mtx_lock(&req->r_mtx); | |
3334 | req->r_error = error; | |
3335 | req->r_flags &= ~R_SENDING; | |
3336 | lck_mtx_unlock(&req->r_mtx); | |
3337 | return error; | |
3338 | } | |
3339 | nmp = req->r_nmp; | |
3340 | sotype = nmp->nm_sotype; | |
3341 | ||
3342 | /* | |
3343 | * If it's a setup RPC but we're not in SETUP... must need reconnect. | |
3344 | * If it's a recovery RPC but the socket's not ready... must need reconnect. | |
3345 | */ | |
3346 | if (((req->r_flags & R_SETUP) && !(nmp->nm_sockflags & NMSOCK_SETUP)) || | |
3347 | ((req->r_flags & R_RECOVER) && !(nmp->nm_sockflags & NMSOCK_READY))) { | |
3348 | error = ETIMEDOUT; | |
3349 | nfs_sndunlock(req); | |
3350 | lck_mtx_lock(&req->r_mtx); | |
3351 | req->r_error = error; | |
3352 | req->r_flags &= ~R_SENDING; | |
3353 | lck_mtx_unlock(&req->r_mtx); | |
3354 | return error; | |
3355 | } | |
3356 | ||
3357 | /* If the socket needs reconnection, do that now. */ | |
3358 | /* wait until socket is ready - unless this request is part of setup */ | |
3359 | lck_mtx_lock(&nmp->nm_lock); | |
3360 | if (!(nmp->nm_sockflags & NMSOCK_READY) && | |
3361 | !((nmp->nm_sockflags & NMSOCK_SETUP) && (req->r_flags & R_SETUP))) { | |
3362 | if (NMFLAG(nmp, INTR) && !(req->r_flags & R_NOINTR)) { | |
3363 | slpflag |= PCATCH; | |
3364 | } | |
3365 | lck_mtx_unlock(&nmp->nm_lock); | |
3366 | nfs_sndunlock(req); | |
3367 | if (!wait) { | |
3368 | lck_mtx_lock(&req->r_mtx); | |
3369 | req->r_flags &= ~R_SENDING; | |
3370 | req->r_flags |= R_MUSTRESEND; | |
3371 | req->r_rtt = 0; | |
3372 | lck_mtx_unlock(&req->r_mtx); | |
3373 | return 0; | |
3374 | } | |
3375 | NFS_SOCK_DBG("nfs_send: 0x%llx wait reconnect\n", req->r_xid); | |
3376 | lck_mtx_lock(&req->r_mtx); | |
3377 | req->r_flags &= ~R_MUSTRESEND; | |
3378 | req->r_rtt = 0; | |
3379 | lck_mtx_unlock(&req->r_mtx); | |
3380 | lck_mtx_lock(&nmp->nm_lock); | |
3381 | while (!(nmp->nm_sockflags & NMSOCK_READY)) { | |
3382 | /* don't bother waiting if the socket thread won't be reconnecting it */ | |
3383 | if (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) { | |
3384 | error = EIO; | |
3385 | break; | |
3386 | } | |
3387 | if ((NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && (nmp->nm_reconnect_start > 0)) { | |
3388 | struct timeval now; | |
3389 | microuptime(&now); | |
3390 | if ((now.tv_sec - nmp->nm_reconnect_start) >= 8) { | |
3391 | /* soft mount in reconnect for a while... terminate ASAP */ | |
3392 | OSAddAtomic64(1, &nfsstats.rpctimeouts); | |
3393 | req->r_flags |= R_SOFTTERM; | |
3394 | req->r_error = error = ETIMEDOUT; | |
3395 | break; | |
3396 | } | |
3397 | } | |
3398 | /* make sure socket thread is running, then wait */ | |
3399 | nfs_mount_sock_thread_wake(nmp); | |
3400 | if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 1))) { | |
3401 | break; | |
3402 | } | |
3403 | msleep(req, &nmp->nm_lock, slpflag | PSOCK, "nfsconnectwait", &ts); | |
3404 | slpflag = 0; | |
3405 | } | |
3406 | lck_mtx_unlock(&nmp->nm_lock); | |
3407 | if (error) { | |
3408 | lck_mtx_lock(&req->r_mtx); | |
3409 | req->r_error = error; | |
3410 | req->r_flags &= ~R_SENDING; | |
3411 | lck_mtx_unlock(&req->r_mtx); | |
3412 | return error; | |
3413 | } | |
3414 | goto again; | |
3415 | } | |
3416 | nso = nmp->nm_nso; | |
3417 | /* note that we're using the mount's socket to do the send */ | |
3418 | nmp->nm_state |= NFSSTA_SENDING; /* will be cleared by nfs_sndunlock() */ | |
3419 | lck_mtx_unlock(&nmp->nm_lock); | |
3420 | if (!nso) { | |
3421 | nfs_sndunlock(req); | |
3422 | lck_mtx_lock(&req->r_mtx); | |
3423 | req->r_flags &= ~R_SENDING; | |
3424 | req->r_flags |= R_MUSTRESEND; | |
3425 | req->r_rtt = 0; | |
3426 | lck_mtx_unlock(&req->r_mtx); | |
3427 | return 0; | |
3428 | } | |
3429 | ||
3430 | lck_mtx_lock(&req->r_mtx); | |
3431 | rexmit = (req->r_flags & R_SENT); | |
3432 | ||
3433 | if (sotype == SOCK_DGRAM) { | |
3434 | lck_mtx_lock(&nmp->nm_lock); | |
3435 | if (!(req->r_flags & R_CWND) && (nmp->nm_sent >= nmp->nm_cwnd)) { | |
3436 | /* if we can't send this out yet, wait on the cwnd queue */ | |
3437 | slpflag = (NMFLAG(nmp, INTR) && req->r_thread) ? PCATCH : 0; | |
3438 | lck_mtx_unlock(&nmp->nm_lock); | |
3439 | nfs_sndunlock(req); | |
3440 | req->r_flags &= ~R_SENDING; | |
3441 | req->r_flags |= R_MUSTRESEND; | |
3442 | lck_mtx_unlock(&req->r_mtx); | |
3443 | if (!wait) { | |
3444 | req->r_rtt = 0; | |
3445 | return 0; | |
3446 | } | |
3447 | lck_mtx_lock(&nmp->nm_lock); | |
3448 | while (nmp->nm_sent >= nmp->nm_cwnd) { | |
3449 | if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 1))) { | |
3450 | break; | |
3451 | } | |
3452 | TAILQ_INSERT_TAIL(&nmp->nm_cwndq, req, r_cchain); | |
3453 | msleep(req, &nmp->nm_lock, slpflag | (PZERO - 1), "nfswaitcwnd", &ts); | |
3454 | slpflag = 0; | |
3455 | if ((req->r_cchain.tqe_next != NFSREQNOLIST)) { | |
3456 | TAILQ_REMOVE(&nmp->nm_cwndq, req, r_cchain); | |
3457 | req->r_cchain.tqe_next = NFSREQNOLIST; | |
3458 | } | |
3459 | } | |
3460 | lck_mtx_unlock(&nmp->nm_lock); | |
3461 | goto again; | |
3462 | } | |
3463 | /* | |
3464 | * We update these *before* the send to avoid racing | |
3465 | * against others who may be looking to send requests. | |
3466 | */ | |
3467 | if (!rexmit) { | |
3468 | /* first transmit */ | |
3469 | req->r_flags |= R_CWND; | |
3470 | nmp->nm_sent += NFS_CWNDSCALE; | |
3471 | } else { | |
3472 | /* | |
3473 | * When retransmitting, turn timing off | |
3474 | * and divide congestion window by 2. | |
3475 | */ | |
3476 | req->r_flags &= ~R_TIMING; | |
3477 | nmp->nm_cwnd >>= 1; | |
3478 | if (nmp->nm_cwnd < NFS_CWNDSCALE) { | |
3479 | nmp->nm_cwnd = NFS_CWNDSCALE; | |
3480 | } | |
3481 | } | |
3482 | lck_mtx_unlock(&nmp->nm_lock); | |
3483 | } | |
3484 | ||
3485 | req->r_flags &= ~R_MUSTRESEND; | |
3486 | lck_mtx_unlock(&req->r_mtx); | |
3487 | ||
3488 | error = mbuf_copym(req->r_mhead, 0, MBUF_COPYALL, | |
3489 | wait ? MBUF_WAITOK : MBUF_DONTWAIT, &mreqcopy); | |
3490 | if (error) { | |
3491 | if (wait) { | |
3492 | log(LOG_INFO, "nfs_send: mbuf copy failed %d\n", error); | |
3493 | } | |
3494 | nfs_sndunlock(req); | |
3495 | lck_mtx_lock(&req->r_mtx); | |
3496 | req->r_flags &= ~R_SENDING; | |
3497 | req->r_flags |= R_MUSTRESEND; | |
3498 | req->r_rtt = 0; | |
3499 | lck_mtx_unlock(&req->r_mtx); | |
3500 | return 0; | |
3501 | } | |
3502 | ||
3503 | bzero(&msg, sizeof(msg)); | |
3504 | if ((sotype != SOCK_STREAM) && !sock_isconnected(nso->nso_so) && ((sendnam = nmp->nm_saddr))) { | |
3505 | msg.msg_name = (caddr_t)sendnam; | |
3506 | msg.msg_namelen = sendnam->sa_len; | |
3507 | } | |
3508 | NFS_SOCK_DUMP_MBUF("Sending mbuf\n", mreqcopy); | |
3509 | error = sock_sendmbuf(nso->nso_so, &msg, mreqcopy, 0, &sentlen); | |
3510 | if (error || (sentlen != req->r_mreqlen)) { | |
3511 | NFS_SOCK_DBG("nfs_send: 0x%llx sent %d/%d error %d\n", | |
3512 | req->r_xid, (int)sentlen, (int)req->r_mreqlen, error); | |
3513 | } | |
3514 | ||
3515 | if (!error && (sentlen != req->r_mreqlen)) { | |
3516 | error = EWOULDBLOCK; | |
3517 | } | |
3518 | needrecon = ((sotype == SOCK_STREAM) && sentlen && (sentlen != req->r_mreqlen)); | |
3519 | ||
3520 | lck_mtx_lock(&req->r_mtx); | |
3521 | req->r_flags &= ~R_SENDING; | |
3522 | req->r_rtt = 0; | |
3523 | if (rexmit && (++req->r_rexmit > NFS_MAXREXMIT)) { | |
3524 | req->r_rexmit = NFS_MAXREXMIT; | |
3525 | } | |
3526 | ||
3527 | if (!error) { | |
3528 | /* SUCCESS */ | |
3529 | req->r_flags &= ~R_RESENDERR; | |
3530 | if (rexmit) { | |
3531 | OSAddAtomic64(1, &nfsstats.rpcretries); | |
3532 | } | |
3533 | req->r_flags |= R_SENT; | |
3534 | if (req->r_flags & R_WAITSENT) { | |
3535 | req->r_flags &= ~R_WAITSENT; | |
3536 | wakeup(req); | |
3537 | } | |
3538 | nfs_sndunlock(req); | |
3539 | lck_mtx_unlock(&req->r_mtx); | |
3540 | return 0; | |
3541 | } | |
3542 | ||
3543 | /* send failed */ | |
3544 | req->r_flags |= R_MUSTRESEND; | |
3545 | if (rexmit) { | |
3546 | req->r_flags |= R_RESENDERR; | |
3547 | } | |
3548 | if ((error == EINTR) || (error == ERESTART)) { | |
3549 | req->r_error = error; | |
3550 | } | |
3551 | lck_mtx_unlock(&req->r_mtx); | |
3552 | ||
3553 | if (sotype == SOCK_DGRAM) { | |
3554 | /* | |
3555 | * Note: even though a first send may fail, we consider | |
3556 | * the request sent for congestion window purposes. | |
3557 | * So we don't need to undo any of the changes made above. | |
3558 | */ | |
3559 | /* | |
3560 | * Socket errors ignored for connectionless sockets?? | |
3561 | * For now, ignore them all | |
3562 | */ | |
3563 | if ((error != EINTR) && (error != ERESTART) && | |
3564 | (error != EWOULDBLOCK) && (error != EIO) && (nso == nmp->nm_nso)) { | |
3565 | int clearerror = 0, optlen = sizeof(clearerror); | |
3566 | sock_getsockopt(nso->nso_so, SOL_SOCKET, SO_ERROR, &clearerror, &optlen); | |
3567 | #ifdef NFS_SOCKET_DEBUGGING | |
3568 | if (clearerror) { | |
3569 | NFS_SOCK_DBG("nfs_send: ignoring UDP socket error %d so %d\n", | |
3570 | error, clearerror); | |
3571 | } | |
3572 | #endif | |
3573 | } | |
3574 | } | |
3575 | ||
3576 | /* check if it appears we should reconnect the socket */ | |
3577 | switch (error) { | |
3578 | case EWOULDBLOCK: | |
3579 | /* if send timed out, reconnect if on TCP */ | |
3580 | if (sotype != SOCK_STREAM) { | |
3581 | break; | |
3582 | } | |
3583 | case EPIPE: | |
3584 | case EADDRNOTAVAIL: | |
3585 | case ENETDOWN: | |
3586 | case ENETUNREACH: | |
3587 | case ENETRESET: | |
3588 | case ECONNABORTED: | |
3589 | case ECONNRESET: | |
3590 | case ENOTCONN: | |
3591 | case ESHUTDOWN: | |
3592 | case ECONNREFUSED: | |
3593 | case EHOSTDOWN: | |
3594 | case EHOSTUNREACH: | |
3595 | /* case ECANCELED??? */ | |
3596 | needrecon = 1; | |
3597 | break; | |
3598 | } | |
3599 | if (needrecon && (nso == nmp->nm_nso)) { /* mark socket as needing reconnect */ | |
3600 | NFS_SOCK_DBG("nfs_send: 0x%llx need reconnect %d\n", req->r_xid, error); | |
3601 | nfs_need_reconnect(nmp); | |
3602 | } | |
3603 | ||
3604 | nfs_sndunlock(req); | |
3605 | ||
3606 | if (nfs_is_dead(error, nmp)) { | |
3607 | error = EIO; | |
3608 | } | |
3609 | ||
3610 | /* | |
3611 | * Don't log some errors: | |
3612 | * EPIPE errors may be common with servers that drop idle connections. | |
3613 | * EADDRNOTAVAIL may occur on network transitions. | |
3614 | * ENOTCONN may occur under some network conditions. | |
3615 | */ | |
3616 | if ((error == EPIPE) || (error == EADDRNOTAVAIL) || (error == ENOTCONN)) { | |
3617 | error = 0; | |
3618 | } | |
3619 | if (error && (error != EINTR) && (error != ERESTART)) { | |
3620 | log(LOG_INFO, "nfs send error %d for server %s\n", error, | |
3621 | !req->r_nmp ? "<unmounted>" : | |
3622 | vfs_statfs(req->r_nmp->nm_mountp)->f_mntfromname); | |
3623 | } | |
3624 | ||
3625 | /* prefer request termination error over other errors */ | |
3626 | error2 = nfs_sigintr(req->r_nmp, req, req->r_thread, 0); | |
3627 | if (error2) { | |
3628 | error = error2; | |
3629 | } | |
3630 | ||
3631 | /* only allow the following errors to be returned */ | |
3632 | if ((error != EINTR) && (error != ERESTART) && (error != EIO) && | |
3633 | (error != ENXIO) && (error != ETIMEDOUT)) { | |
3634 | /* | |
3635 | * We got some error we don't know what do do with, | |
3636 | * i.e., we're not reconnecting, we map it to | |
3637 | * EIO. Presumably our send failed and we better tell | |
3638 | * the caller so they don't wait for a reply that is | |
3639 | * never going to come. If we are reconnecting we | |
3640 | * return 0 and the request will be resent. | |
3641 | */ | |
3642 | error = needrecon ? 0 : EIO; | |
3643 | } | |
3644 | return error; | |
3645 | } | |
3646 | ||
3647 | /* | |
3648 | * NFS client socket upcalls | |
3649 | * | |
3650 | * Pull RPC replies out of an NFS mount's socket and match them | |
3651 | * up with the pending request. | |
3652 | * | |
3653 | * The datagram code is simple because we always get whole | |
3654 | * messages out of the socket. | |
3655 | * | |
3656 | * The stream code is more involved because we have to parse | |
3657 | * the RPC records out of the stream. | |
3658 | */ | |
3659 | ||
3660 | /* NFS client UDP socket upcall */ | |
3661 | void | |
3662 | nfs_udp_rcv(socket_t so, void *arg, __unused int waitflag) | |
3663 | { | |
3664 | struct nfsmount *nmp = arg; | |
3665 | struct nfs_socket *nso = nmp->nm_nso; | |
3666 | size_t rcvlen; | |
3667 | mbuf_t m; | |
3668 | int error = 0; | |
3669 | ||
3670 | if (nmp->nm_sockflags & NMSOCK_CONNECTING) { | |
3671 | return; | |
3672 | } | |
3673 | ||
3674 | do { | |
3675 | /* make sure we're on the current socket */ | |
3676 | if (!nso || (nso->nso_so != so)) { | |
3677 | return; | |
3678 | } | |
3679 | ||
3680 | m = NULL; | |
3681 | rcvlen = 1000000; | |
3682 | error = sock_receivembuf(so, NULL, &m, MSG_DONTWAIT, &rcvlen); | |
3683 | if (m) { | |
3684 | nfs_request_match_reply(nmp, m); | |
3685 | } | |
3686 | } while (m && !error); | |
3687 | ||
3688 | if (error && (error != EWOULDBLOCK)) { | |
3689 | /* problems with the socket... mark for reconnection */ | |
3690 | NFS_SOCK_DBG("nfs_udp_rcv: need reconnect %d\n", error); | |
3691 | nfs_need_reconnect(nmp); | |
3692 | } | |
3693 | } | |
3694 | ||
3695 | /* NFS client TCP socket upcall */ | |
3696 | void | |
3697 | nfs_tcp_rcv(socket_t so, void *arg, __unused int waitflag) | |
3698 | { | |
3699 | struct nfsmount *nmp = arg; | |
3700 | struct nfs_socket *nso = nmp->nm_nso; | |
3701 | struct nfs_rpc_record_state nrrs; | |
3702 | mbuf_t m; | |
3703 | int error = 0; | |
3704 | int recv = 1; | |
3705 | int wup = 0; | |
3706 | ||
3707 | if (nmp->nm_sockflags & NMSOCK_CONNECTING) { | |
3708 | return; | |
3709 | } | |
3710 | ||
3711 | /* make sure we're on the current socket */ | |
3712 | lck_mtx_lock(&nmp->nm_lock); | |
3713 | nso = nmp->nm_nso; | |
3714 | if (!nso || (nso->nso_so != so) || (nmp->nm_sockflags & (NMSOCK_DISCONNECTING))) { | |
3715 | lck_mtx_unlock(&nmp->nm_lock); | |
3716 | return; | |
3717 | } | |
3718 | lck_mtx_unlock(&nmp->nm_lock); | |
3719 | ||
3720 | /* make sure this upcall should be trying to do work */ | |
3721 | lck_mtx_lock(&nso->nso_lock); | |
3722 | if (nso->nso_flags & (NSO_UPCALL | NSO_DISCONNECTING | NSO_DEAD)) { | |
3723 | lck_mtx_unlock(&nso->nso_lock); | |
3724 | return; | |
3725 | } | |
3726 | nso->nso_flags |= NSO_UPCALL; | |
3727 | nrrs = nso->nso_rrs; | |
3728 | lck_mtx_unlock(&nso->nso_lock); | |
3729 | ||
3730 | /* loop while we make error-free progress */ | |
3731 | while (!error && recv) { | |
3732 | error = nfs_rpc_record_read(so, &nrrs, MSG_DONTWAIT, &recv, &m); | |
3733 | if (m) { /* match completed response with request */ | |
3734 | nfs_request_match_reply(nmp, m); | |
3735 | } | |
3736 | } | |
3737 | ||
3738 | /* Update the sockets's rpc parsing state */ | |
3739 | lck_mtx_lock(&nso->nso_lock); | |
3740 | nso->nso_rrs = nrrs; | |
3741 | if (nso->nso_flags & NSO_DISCONNECTING) { | |
3742 | wup = 1; | |
3743 | } | |
3744 | nso->nso_flags &= ~NSO_UPCALL; | |
3745 | lck_mtx_unlock(&nso->nso_lock); | |
3746 | if (wup) { | |
3747 | wakeup(&nso->nso_flags); | |
3748 | } | |
3749 | ||
3750 | #ifdef NFS_SOCKET_DEBUGGING | |
3751 | if (!recv && (error != EWOULDBLOCK)) { | |
3752 | NFS_SOCK_DBG("nfs_tcp_rcv: got nothing, error %d, got FIN?\n", error); | |
3753 | } | |
3754 | #endif | |
3755 | /* note: no error and no data indicates server closed its end */ | |
3756 | if ((error != EWOULDBLOCK) && (error || !recv)) { | |
3757 | /* problems with the socket... mark for reconnection */ | |
3758 | NFS_SOCK_DBG("nfs_tcp_rcv: need reconnect %d\n", error); | |
3759 | nfs_need_reconnect(nmp); | |
3760 | } | |
3761 | } | |
3762 | ||
3763 | /* | |
3764 | * "poke" a socket to try to provoke any pending errors | |
3765 | */ | |
3766 | void | |
3767 | nfs_sock_poke(struct nfsmount *nmp) | |
3768 | { | |
3769 | struct iovec aio; | |
3770 | struct msghdr msg; | |
3771 | size_t len; | |
3772 | int error = 0; | |
3773 | int dummy; | |
3774 | ||
3775 | lck_mtx_lock(&nmp->nm_lock); | |
3776 | if ((nmp->nm_sockflags & NMSOCK_UNMOUNT) || | |
3777 | !(nmp->nm_sockflags & NMSOCK_READY) || !nmp->nm_nso || !nmp->nm_nso->nso_so) { | |
3778 | /* Nothing to poke */ | |
3779 | nmp->nm_sockflags &= ~NMSOCK_POKE; | |
3780 | wakeup(&nmp->nm_sockflags); | |
3781 | lck_mtx_unlock(&nmp->nm_lock); | |
3782 | return; | |
3783 | } | |
3784 | lck_mtx_unlock(&nmp->nm_lock); | |
3785 | aio.iov_base = &dummy; | |
3786 | aio.iov_len = 0; | |
3787 | len = 0; | |
3788 | bzero(&msg, sizeof(msg)); | |
3789 | msg.msg_iov = &aio; | |
3790 | msg.msg_iovlen = 1; | |
3791 | error = sock_send(nmp->nm_nso->nso_so, &msg, MSG_DONTWAIT, &len); | |
3792 | NFS_SOCK_DBG("nfs_sock_poke: error %d\n", error); | |
3793 | lck_mtx_lock(&nmp->nm_lock); | |
3794 | nmp->nm_sockflags &= ~NMSOCK_POKE; | |
3795 | wakeup(&nmp->nm_sockflags); | |
3796 | lck_mtx_unlock(&nmp->nm_lock); | |
3797 | nfs_is_dead(error, nmp); | |
3798 | } | |
3799 | ||
3800 | /* | |
3801 | * Match an RPC reply with the corresponding request | |
3802 | */ | |
3803 | void | |
3804 | nfs_request_match_reply(struct nfsmount *nmp, mbuf_t mrep) | |
3805 | { | |
3806 | struct nfsreq *req; | |
3807 | struct nfsm_chain nmrep; | |
3808 | u_int32_t reply = 0, rxid = 0; | |
3809 | int error = 0, asyncioq, t1; | |
3810 | ||
3811 | /* Get the xid and check that it is an rpc reply */ | |
3812 | nfsm_chain_dissect_init(error, &nmrep, mrep); | |
3813 | nfsm_chain_get_32(error, &nmrep, rxid); | |
3814 | nfsm_chain_get_32(error, &nmrep, reply); | |
3815 | if (error || (reply != RPC_REPLY)) { | |
3816 | OSAddAtomic64(1, &nfsstats.rpcinvalid); | |
3817 | mbuf_freem(mrep); | |
3818 | return; | |
3819 | } | |
3820 | ||
3821 | /* | |
3822 | * Loop through the request list to match up the reply | |
3823 | * Iff no match, just drop it. | |
3824 | */ | |
3825 | lck_mtx_lock(nfs_request_mutex); | |
3826 | TAILQ_FOREACH(req, &nfs_reqq, r_chain) { | |
3827 | if (req->r_nmrep.nmc_mhead || (rxid != R_XID32(req->r_xid))) { | |
3828 | continue; | |
3829 | } | |
3830 | /* looks like we have it, grab lock and double check */ | |
3831 | lck_mtx_lock(&req->r_mtx); | |
3832 | if (req->r_nmrep.nmc_mhead || (rxid != R_XID32(req->r_xid))) { | |
3833 | lck_mtx_unlock(&req->r_mtx); | |
3834 | continue; | |
3835 | } | |
3836 | /* Found it.. */ | |
3837 | req->r_nmrep = nmrep; | |
3838 | lck_mtx_lock(&nmp->nm_lock); | |
3839 | if (nmp->nm_sotype == SOCK_DGRAM) { | |
3840 | /* | |
3841 | * Update congestion window. | |
3842 | * Do the additive increase of one rpc/rtt. | |
3843 | */ | |
3844 | FSDBG(530, R_XID32(req->r_xid), req, nmp->nm_sent, nmp->nm_cwnd); | |
3845 | if (nmp->nm_cwnd <= nmp->nm_sent) { | |
3846 | nmp->nm_cwnd += | |
3847 | ((NFS_CWNDSCALE * NFS_CWNDSCALE) + | |
3848 | (nmp->nm_cwnd >> 1)) / nmp->nm_cwnd; | |
3849 | if (nmp->nm_cwnd > NFS_MAXCWND) { | |
3850 | nmp->nm_cwnd = NFS_MAXCWND; | |
3851 | } | |
3852 | } | |
3853 | if (req->r_flags & R_CWND) { | |
3854 | nmp->nm_sent -= NFS_CWNDSCALE; | |
3855 | req->r_flags &= ~R_CWND; | |
3856 | } | |
3857 | if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) { | |
3858 | /* congestion window is open, poke the cwnd queue */ | |
3859 | struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq); | |
3860 | TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain); | |
3861 | req2->r_cchain.tqe_next = NFSREQNOLIST; | |
3862 | wakeup(req2); | |
3863 | } | |
3864 | } | |
3865 | /* | |
3866 | * Update rtt using a gain of 0.125 on the mean | |
3867 | * and a gain of 0.25 on the deviation. | |
3868 | */ | |
3869 | if (req->r_flags & R_TIMING) { | |
3870 | /* | |
3871 | * Since the timer resolution of | |
3872 | * NFS_HZ is so course, it can often | |
3873 | * result in r_rtt == 0. Since | |
3874 | * r_rtt == N means that the actual | |
3875 | * rtt is between N+dt and N+2-dt ticks, | |
3876 | * add 1. | |
3877 | */ | |
3878 | if (proct[req->r_procnum] == 0) { | |
3879 | panic("nfs_request_match_reply: proct[%d] is zero", req->r_procnum); | |
3880 | } | |
3881 | t1 = req->r_rtt + 1; | |
3882 | t1 -= (NFS_SRTT(req) >> 3); | |
3883 | NFS_SRTT(req) += t1; | |
3884 | if (t1 < 0) { | |
3885 | t1 = -t1; | |
3886 | } | |
3887 | t1 -= (NFS_SDRTT(req) >> 2); | |
3888 | NFS_SDRTT(req) += t1; | |
3889 | } | |
3890 | nmp->nm_timeouts = 0; | |
3891 | lck_mtx_unlock(&nmp->nm_lock); | |
3892 | /* signal anyone waiting on this request */ | |
3893 | wakeup(req); | |
3894 | asyncioq = (req->r_callback.rcb_func != NULL); | |
3895 | #if CONFIG_NFS_GSS | |
3896 | if (nfs_request_using_gss(req)) { | |
3897 | nfs_gss_clnt_rpcdone(req); | |
3898 | } | |
3899 | #endif /* CONFIG_NFS_GSS */ | |
3900 | lck_mtx_unlock(&req->r_mtx); | |
3901 | lck_mtx_unlock(nfs_request_mutex); | |
3902 | /* if it's an async RPC with a callback, queue it up */ | |
3903 | if (asyncioq) { | |
3904 | nfs_asyncio_finish(req); | |
3905 | } | |
3906 | break; | |
3907 | } | |
3908 | ||
3909 | if (!req) { | |
3910 | /* not matched to a request, so drop it. */ | |
3911 | lck_mtx_unlock(nfs_request_mutex); | |
3912 | OSAddAtomic64(1, &nfsstats.rpcunexpected); | |
3913 | mbuf_freem(mrep); | |
3914 | } | |
3915 | } | |
3916 | ||
3917 | /* | |
3918 | * Wait for the reply for a given request... | |
3919 | * ...potentially resending the request if necessary. | |
3920 | */ | |
3921 | int | |
3922 | nfs_wait_reply(struct nfsreq *req) | |
3923 | { | |
3924 | struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 }; | |
3925 | int error = 0, slpflag, first = 1; | |
3926 | ||
3927 | if (req->r_nmp && NMFLAG(req->r_nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) { | |
3928 | slpflag = PCATCH; | |
3929 | } else { | |
3930 | slpflag = 0; | |
3931 | } | |
3932 | ||
3933 | lck_mtx_lock(&req->r_mtx); | |
3934 | while (!req->r_nmrep.nmc_mhead) { | |
3935 | if ((error = nfs_sigintr(req->r_nmp, req, first ? NULL : req->r_thread, 0))) { | |
3936 | break; | |
3937 | } | |
3938 | if (((error = req->r_error)) || req->r_nmrep.nmc_mhead) { | |
3939 | break; | |
3940 | } | |
3941 | /* check if we need to resend */ | |
3942 | if (req->r_flags & R_MUSTRESEND) { | |
3943 | NFS_SOCK_DBG("nfs wait resend: p %d x 0x%llx f 0x%x rtt %d\n", | |
3944 | req->r_procnum, req->r_xid, req->r_flags, req->r_rtt); | |
3945 | req->r_flags |= R_SENDING; | |
3946 | lck_mtx_unlock(&req->r_mtx); | |
3947 | if (nfs_request_using_gss(req)) { | |
3948 | /* | |
3949 | * It's an RPCSEC_GSS request. | |
3950 | * Can't just resend the original request | |
3951 | * without bumping the cred sequence number. | |
3952 | * Go back and re-build the request. | |
3953 | */ | |
3954 | lck_mtx_lock(&req->r_mtx); | |
3955 | req->r_flags &= ~R_SENDING; | |
3956 | lck_mtx_unlock(&req->r_mtx); | |
3957 | return EAGAIN; | |
3958 | } | |
3959 | error = nfs_send(req, 1); | |
3960 | lck_mtx_lock(&req->r_mtx); | |
3961 | NFS_SOCK_DBG("nfs wait resend: p %d x 0x%llx f 0x%x rtt %d err %d\n", | |
3962 | req->r_procnum, req->r_xid, req->r_flags, req->r_rtt, error); | |
3963 | if (error) { | |
3964 | break; | |
3965 | } | |
3966 | if (((error = req->r_error)) || req->r_nmrep.nmc_mhead) { | |
3967 | break; | |
3968 | } | |
3969 | } | |
3970 | /* need to poll if we're P_NOREMOTEHANG */ | |
3971 | if (nfs_noremotehang(req->r_thread)) { | |
3972 | ts.tv_sec = 1; | |
3973 | } | |
3974 | msleep(req, &req->r_mtx, slpflag | (PZERO - 1), "nfswaitreply", &ts); | |
3975 | first = slpflag = 0; | |
3976 | } | |
3977 | lck_mtx_unlock(&req->r_mtx); | |
3978 | ||
3979 | return error; | |
3980 | } | |
3981 | ||
3982 | /* | |
3983 | * An NFS request goes something like this: | |
3984 | * (nb: always frees up mreq mbuf list) | |
3985 | * nfs_request_create() | |
3986 | * - allocates a request struct if one is not provided | |
3987 | * - initial fill-in of the request struct | |
3988 | * nfs_request_add_header() | |
3989 | * - add the RPC header | |
3990 | * nfs_request_send() | |
3991 | * - link it into list | |
3992 | * - call nfs_send() for first transmit | |
3993 | * nfs_request_wait() | |
3994 | * - call nfs_wait_reply() to wait for the reply | |
3995 | * nfs_request_finish() | |
3996 | * - break down rpc header and return with error or nfs reply | |
3997 | * pointed to by nmrep. | |
3998 | * nfs_request_rele() | |
3999 | * nfs_request_destroy() | |
4000 | * - clean up the request struct | |
4001 | * - free the request struct if it was allocated by nfs_request_create() | |
4002 | */ | |
4003 | ||
4004 | /* | |
4005 | * Set up an NFS request struct (allocating if no request passed in). | |
4006 | */ | |
4007 | int | |
4008 | nfs_request_create( | |
4009 | nfsnode_t np, | |
4010 | mount_t mp, /* used only if !np */ | |
4011 | struct nfsm_chain *nmrest, | |
4012 | int procnum, | |
4013 | thread_t thd, | |
4014 | kauth_cred_t cred, | |
4015 | struct nfsreq **reqp) | |
4016 | { | |
4017 | struct nfsreq *req, *newreq = NULL; | |
4018 | struct nfsmount *nmp; | |
4019 | ||
4020 | req = *reqp; | |
4021 | if (!req) { | |
4022 | /* allocate a new NFS request structure */ | |
4023 | MALLOC_ZONE(newreq, struct nfsreq*, sizeof(*newreq), M_NFSREQ, M_WAITOK); | |
4024 | if (!newreq) { | |
4025 | mbuf_freem(nmrest->nmc_mhead); | |
4026 | nmrest->nmc_mhead = NULL; | |
4027 | return ENOMEM; | |
4028 | } | |
4029 | req = newreq; | |
4030 | } | |
4031 | ||
4032 | bzero(req, sizeof(*req)); | |
4033 | if (req == newreq) { | |
4034 | req->r_flags = R_ALLOCATED; | |
4035 | } | |
4036 | ||
4037 | nmp = VFSTONFS(np ? NFSTOMP(np) : mp); | |
4038 | if (nfs_mount_gone(nmp)) { | |
4039 | if (newreq) { | |
4040 | FREE_ZONE(newreq, sizeof(*newreq), M_NFSREQ); | |
4041 | } | |
4042 | return ENXIO; | |
4043 | } | |
4044 | lck_mtx_lock(&nmp->nm_lock); | |
4045 | if ((nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) && | |
4046 | (nmp->nm_state & NFSSTA_TIMEO)) { | |
4047 | lck_mtx_unlock(&nmp->nm_lock); | |
4048 | mbuf_freem(nmrest->nmc_mhead); | |
4049 | nmrest->nmc_mhead = NULL; | |
4050 | if (newreq) { | |
4051 | FREE_ZONE(newreq, sizeof(*newreq), M_NFSREQ); | |
4052 | } | |
4053 | return ENXIO; | |
4054 | } | |
4055 | ||
4056 | if ((nmp->nm_vers != NFS_VER4) && (procnum >= 0) && (procnum < NFS_NPROCS)) { | |
4057 | OSAddAtomic64(1, &nfsstats.rpccnt[procnum]); | |
4058 | } | |
4059 | if ((nmp->nm_vers == NFS_VER4) && (procnum != NFSPROC4_COMPOUND) && (procnum != NFSPROC4_NULL)) { | |
4060 | panic("nfs_request: invalid NFSv4 RPC request %d\n", procnum); | |
4061 | } | |
4062 | ||
4063 | lck_mtx_init(&req->r_mtx, nfs_request_grp, LCK_ATTR_NULL); | |
4064 | req->r_nmp = nmp; | |
4065 | nmp->nm_ref++; | |
4066 | req->r_np = np; | |
4067 | req->r_thread = thd; | |
4068 | if (!thd) { | |
4069 | req->r_flags |= R_NOINTR; | |
4070 | } | |
4071 | if (IS_VALID_CRED(cred)) { | |
4072 | kauth_cred_ref(cred); | |
4073 | req->r_cred = cred; | |
4074 | } | |
4075 | req->r_procnum = procnum; | |
4076 | if (proct[procnum] > 0) { | |
4077 | req->r_flags |= R_TIMING; | |
4078 | } | |
4079 | req->r_nmrep.nmc_mhead = NULL; | |
4080 | SLIST_INIT(&req->r_gss_seqlist); | |
4081 | req->r_achain.tqe_next = NFSREQNOLIST; | |
4082 | req->r_rchain.tqe_next = NFSREQNOLIST; | |
4083 | req->r_cchain.tqe_next = NFSREQNOLIST; | |
4084 | ||
4085 | /* set auth flavor to use for request */ | |
4086 | if (!req->r_cred) { | |
4087 | req->r_auth = RPCAUTH_NONE; | |
4088 | } else if (req->r_np && (req->r_np->n_auth != RPCAUTH_INVALID)) { | |
4089 | req->r_auth = req->r_np->n_auth; | |
4090 | } else { | |
4091 | req->r_auth = nmp->nm_auth; | |
4092 | } | |
4093 | ||
4094 | lck_mtx_unlock(&nmp->nm_lock); | |
4095 | ||
4096 | /* move the request mbuf chain to the nfsreq */ | |
4097 | req->r_mrest = nmrest->nmc_mhead; | |
4098 | nmrest->nmc_mhead = NULL; | |
4099 | ||
4100 | req->r_flags |= R_INITTED; | |
4101 | req->r_refs = 1; | |
4102 | if (newreq) { | |
4103 | *reqp = req; | |
4104 | } | |
4105 | return 0; | |
4106 | } | |
4107 | ||
4108 | /* | |
4109 | * Clean up and free an NFS request structure. | |
4110 | */ | |
4111 | void | |
4112 | nfs_request_destroy(struct nfsreq *req) | |
4113 | { | |
4114 | struct nfsmount *nmp; | |
4115 | int clearjbtimeo = 0; | |
4116 | ||
4117 | #if CONFIG_NFS_GSS | |
4118 | struct gss_seq *gsp, *ngsp; | |
4119 | #endif | |
4120 | ||
4121 | if (!req || !(req->r_flags & R_INITTED)) { | |
4122 | return; | |
4123 | } | |
4124 | nmp = req->r_nmp; | |
4125 | req->r_flags &= ~R_INITTED; | |
4126 | if (req->r_lflags & RL_QUEUED) { | |
4127 | nfs_reqdequeue(req); | |
4128 | } | |
4129 | ||
4130 | if (req->r_achain.tqe_next != NFSREQNOLIST) { | |
4131 | /* | |
4132 | * Still on an async I/O queue? | |
4133 | * %%% But which one, we may be on a local iod. | |
4134 | */ | |
4135 | lck_mtx_lock(nfsiod_mutex); | |
4136 | if (nmp && req->r_achain.tqe_next != NFSREQNOLIST) { | |
4137 | TAILQ_REMOVE(&nmp->nm_iodq, req, r_achain); | |
4138 | req->r_achain.tqe_next = NFSREQNOLIST; | |
4139 | } | |
4140 | lck_mtx_unlock(nfsiod_mutex); | |
4141 | } | |
4142 | ||
4143 | lck_mtx_lock(&req->r_mtx); | |
4144 | if (nmp) { | |
4145 | lck_mtx_lock(&nmp->nm_lock); | |
4146 | if (req->r_flags & R_CWND) { | |
4147 | /* Decrement the outstanding request count. */ | |
4148 | req->r_flags &= ~R_CWND; | |
4149 | nmp->nm_sent -= NFS_CWNDSCALE; | |
4150 | if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) { | |
4151 | /* congestion window is open, poke the cwnd queue */ | |
4152 | struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq); | |
4153 | TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain); | |
4154 | req2->r_cchain.tqe_next = NFSREQNOLIST; | |
4155 | wakeup(req2); | |
4156 | } | |
4157 | } | |
4158 | assert((req->r_flags & R_RESENDQ) == 0); | |
4159 | /* XXX should we just remove this conditional, we should have a reference if we're resending */ | |
4160 | if (req->r_rchain.tqe_next != NFSREQNOLIST) { | |
4161 | TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain); | |
4162 | req->r_rchain.tqe_next = NFSREQNOLIST; | |
4163 | if (req->r_flags & R_RESENDQ) { | |
4164 | req->r_flags &= ~R_RESENDQ; | |
4165 | } | |
4166 | } | |
4167 | if (req->r_cchain.tqe_next != NFSREQNOLIST) { | |
4168 | TAILQ_REMOVE(&nmp->nm_cwndq, req, r_cchain); | |
4169 | req->r_cchain.tqe_next = NFSREQNOLIST; | |
4170 | } | |
4171 | if (req->r_flags & R_JBTPRINTFMSG) { | |
4172 | req->r_flags &= ~R_JBTPRINTFMSG; | |
4173 | nmp->nm_jbreqs--; | |
4174 | clearjbtimeo = (nmp->nm_jbreqs == 0) ? NFSSTA_JUKEBOXTIMEO : 0; | |
4175 | } | |
4176 | lck_mtx_unlock(&nmp->nm_lock); | |
4177 | } | |
4178 | lck_mtx_unlock(&req->r_mtx); | |
4179 | ||
4180 | if (clearjbtimeo) { | |
4181 | nfs_up(nmp, req->r_thread, clearjbtimeo, NULL); | |
4182 | } | |
4183 | if (req->r_mhead) { | |
4184 | mbuf_freem(req->r_mhead); | |
4185 | } else if (req->r_mrest) { | |
4186 | mbuf_freem(req->r_mrest); | |
4187 | } | |
4188 | if (req->r_nmrep.nmc_mhead) { | |
4189 | mbuf_freem(req->r_nmrep.nmc_mhead); | |
4190 | } | |
4191 | if (IS_VALID_CRED(req->r_cred)) { | |
4192 | kauth_cred_unref(&req->r_cred); | |
4193 | } | |
4194 | #if CONFIG_NFS_GSS | |
4195 | if (nfs_request_using_gss(req)) { | |
4196 | nfs_gss_clnt_rpcdone(req); | |
4197 | } | |
4198 | SLIST_FOREACH_SAFE(gsp, &req->r_gss_seqlist, gss_seqnext, ngsp) | |
4199 | FREE(gsp, M_TEMP); | |
4200 | if (req->r_gss_ctx) { | |
4201 | nfs_gss_clnt_ctx_unref(req); | |
4202 | } | |
4203 | #endif /* CONFIG_NFS_GSS */ | |
4204 | if (req->r_wrongsec) { | |
4205 | FREE(req->r_wrongsec, M_TEMP); | |
4206 | } | |
4207 | if (nmp) { | |
4208 | nfs_mount_rele(nmp); | |
4209 | } | |
4210 | lck_mtx_destroy(&req->r_mtx, nfs_request_grp); | |
4211 | if (req->r_flags & R_ALLOCATED) { | |
4212 | FREE_ZONE(req, sizeof(*req), M_NFSREQ); | |
4213 | } | |
4214 | } | |
4215 | ||
4216 | void | |
4217 | nfs_request_ref(struct nfsreq *req, int locked) | |
4218 | { | |
4219 | if (!locked) { | |
4220 | lck_mtx_lock(&req->r_mtx); | |
4221 | } | |
4222 | if (req->r_refs <= 0) { | |
4223 | panic("nfsreq reference error"); | |
4224 | } | |
4225 | req->r_refs++; | |
4226 | if (!locked) { | |
4227 | lck_mtx_unlock(&req->r_mtx); | |
4228 | } | |
4229 | } | |
4230 | ||
4231 | void | |
4232 | nfs_request_rele(struct nfsreq *req) | |
4233 | { | |
4234 | int destroy; | |
4235 | ||
4236 | lck_mtx_lock(&req->r_mtx); | |
4237 | if (req->r_refs <= 0) { | |
4238 | panic("nfsreq reference underflow"); | |
4239 | } | |
4240 | req->r_refs--; | |
4241 | destroy = (req->r_refs == 0); | |
4242 | lck_mtx_unlock(&req->r_mtx); | |
4243 | if (destroy) { | |
4244 | nfs_request_destroy(req); | |
4245 | } | |
4246 | } | |
4247 | ||
4248 | ||
4249 | /* | |
4250 | * Add an (updated) RPC header with authorization to an NFS request. | |
4251 | */ | |
4252 | int | |
4253 | nfs_request_add_header(struct nfsreq *req) | |
4254 | { | |
4255 | struct nfsmount *nmp; | |
4256 | int error = 0; | |
4257 | mbuf_t m; | |
4258 | ||
4259 | /* free up any previous header */ | |
4260 | if ((m = req->r_mhead)) { | |
4261 | while (m && (m != req->r_mrest)) { | |
4262 | m = mbuf_free(m); | |
4263 | } | |
4264 | req->r_mhead = NULL; | |
4265 | } | |
4266 | ||
4267 | nmp = req->r_nmp; | |
4268 | if (nfs_mount_gone(nmp)) { | |
4269 | return ENXIO; | |
4270 | } | |
4271 | ||
4272 | error = nfsm_rpchead(req, req->r_mrest, &req->r_xid, &req->r_mhead); | |
4273 | if (error) { | |
4274 | return error; | |
4275 | } | |
4276 | ||
4277 | req->r_mreqlen = mbuf_pkthdr_len(req->r_mhead); | |
4278 | nmp = req->r_nmp; | |
4279 | if (nfs_mount_gone(nmp)) { | |
4280 | return ENXIO; | |
4281 | } | |
4282 | lck_mtx_lock(&nmp->nm_lock); | |
4283 | if (NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) { | |
4284 | req->r_retry = nmp->nm_retry; | |
4285 | } else { | |
4286 | req->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */ | |
4287 | } | |
4288 | lck_mtx_unlock(&nmp->nm_lock); | |
4289 | ||
4290 | return error; | |
4291 | } | |
4292 | ||
4293 | ||
4294 | /* | |
4295 | * Queue an NFS request up and send it out. | |
4296 | */ | |
4297 | int | |
4298 | nfs_request_send(struct nfsreq *req, int wait) | |
4299 | { | |
4300 | struct nfsmount *nmp; | |
4301 | struct timeval now; | |
4302 | ||
4303 | lck_mtx_lock(&req->r_mtx); | |
4304 | req->r_flags |= R_SENDING; | |
4305 | lck_mtx_unlock(&req->r_mtx); | |
4306 | ||
4307 | lck_mtx_lock(nfs_request_mutex); | |
4308 | ||
4309 | nmp = req->r_nmp; | |
4310 | if (nfs_mount_gone(nmp)) { | |
4311 | lck_mtx_unlock(nfs_request_mutex); | |
4312 | return ENXIO; | |
4313 | } | |
4314 | ||
4315 | microuptime(&now); | |
4316 | if (!req->r_start) { | |
4317 | req->r_start = now.tv_sec; | |
4318 | req->r_lastmsg = now.tv_sec - | |
4319 | ((nmp->nm_tprintf_delay) - (nmp->nm_tprintf_initial_delay)); | |
4320 | } | |
4321 | ||
4322 | OSAddAtomic64(1, &nfsstats.rpcrequests); | |
4323 | ||
4324 | /* | |
4325 | * Chain request into list of outstanding requests. Be sure | |
4326 | * to put it LAST so timer finds oldest requests first. | |
4327 | * Make sure that the request queue timer is running | |
4328 | * to check for possible request timeout. | |
4329 | */ | |
4330 | TAILQ_INSERT_TAIL(&nfs_reqq, req, r_chain); | |
4331 | req->r_lflags |= RL_QUEUED; | |
4332 | if (!nfs_request_timer_on) { | |
4333 | nfs_request_timer_on = 1; | |
4334 | nfs_interval_timer_start(nfs_request_timer_call, | |
4335 | NFS_REQUESTDELAY); | |
4336 | } | |
4337 | lck_mtx_unlock(nfs_request_mutex); | |
4338 | ||
4339 | /* Send the request... */ | |
4340 | return nfs_send(req, wait); | |
4341 | } | |
4342 | ||
4343 | /* | |
4344 | * Call nfs_wait_reply() to wait for the reply. | |
4345 | */ | |
4346 | void | |
4347 | nfs_request_wait(struct nfsreq *req) | |
4348 | { | |
4349 | req->r_error = nfs_wait_reply(req); | |
4350 | } | |
4351 | ||
4352 | /* | |
4353 | * Finish up an NFS request by dequeueing it and | |
4354 | * doing the initial NFS request reply processing. | |
4355 | */ | |
4356 | int | |
4357 | nfs_request_finish( | |
4358 | struct nfsreq *req, | |
4359 | struct nfsm_chain *nmrepp, | |
4360 | int *status) | |
4361 | { | |
4362 | struct nfsmount *nmp; | |
4363 | mbuf_t mrep; | |
4364 | int verf_type = 0; | |
4365 | uint32_t verf_len = 0; | |
4366 | uint32_t reply_status = 0; | |
4367 | uint32_t rejected_status = 0; | |
4368 | uint32_t auth_status = 0; | |
4369 | uint32_t accepted_status = 0; | |
4370 | struct nfsm_chain nmrep; | |
4371 | int error, clearjbtimeo; | |
4372 | ||
4373 | error = req->r_error; | |
4374 | ||
4375 | if (nmrepp) { | |
4376 | nmrepp->nmc_mhead = NULL; | |
4377 | } | |
4378 | ||
4379 | /* RPC done, unlink the request. */ | |
4380 | nfs_reqdequeue(req); | |
4381 | ||
4382 | mrep = req->r_nmrep.nmc_mhead; | |
4383 | ||
4384 | nmp = req->r_nmp; | |
4385 | ||
4386 | if ((req->r_flags & R_CWND) && nmp) { | |
4387 | /* | |
4388 | * Decrement the outstanding request count. | |
4389 | */ | |
4390 | req->r_flags &= ~R_CWND; | |
4391 | lck_mtx_lock(&nmp->nm_lock); | |
4392 | FSDBG(273, R_XID32(req->r_xid), req, nmp->nm_sent, nmp->nm_cwnd); | |
4393 | nmp->nm_sent -= NFS_CWNDSCALE; | |
4394 | if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) { | |
4395 | /* congestion window is open, poke the cwnd queue */ | |
4396 | struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq); | |
4397 | TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain); | |
4398 | req2->r_cchain.tqe_next = NFSREQNOLIST; | |
4399 | wakeup(req2); | |
4400 | } | |
4401 | lck_mtx_unlock(&nmp->nm_lock); | |
4402 | } | |
4403 | ||
4404 | #if CONFIG_NFS_GSS | |
4405 | if (nfs_request_using_gss(req)) { | |
4406 | /* | |
4407 | * If the request used an RPCSEC_GSS credential | |
4408 | * then reset its sequence number bit in the | |
4409 | * request window. | |
4410 | */ | |
4411 | nfs_gss_clnt_rpcdone(req); | |
4412 | ||
4413 | /* | |
4414 | * If we need to re-send, go back and re-build the | |
4415 | * request based on a new sequence number. | |
4416 | * Note that we're using the original XID. | |
4417 | */ | |
4418 | if (error == EAGAIN) { | |
4419 | req->r_error = 0; | |
4420 | if (mrep) { | |
4421 | mbuf_freem(mrep); | |
4422 | } | |
4423 | error = nfs_gss_clnt_args_restore(req); // remove any trailer mbufs | |
4424 | req->r_nmrep.nmc_mhead = NULL; | |
4425 | req->r_flags |= R_RESTART; | |
4426 | if (error == ENEEDAUTH) { | |
4427 | req->r_xid = 0; // get a new XID | |
4428 | error = 0; | |
4429 | } | |
4430 | goto nfsmout; | |
4431 | } | |
4432 | } | |
4433 | #endif /* CONFIG_NFS_GSS */ | |
4434 | ||
4435 | /* | |
4436 | * If there was a successful reply, make sure to mark the mount as up. | |
4437 | * If a tprintf message was given (or if this is a timed-out soft mount) | |
4438 | * then post a tprintf message indicating the server is alive again. | |
4439 | */ | |
4440 | if (!error) { | |
4441 | if ((req->r_flags & R_TPRINTFMSG) || | |
4442 | (nmp && (NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && | |
4443 | ((nmp->nm_state & (NFSSTA_TIMEO | NFSSTA_FORCE | NFSSTA_DEAD)) == NFSSTA_TIMEO))) { | |
4444 | nfs_up(nmp, req->r_thread, NFSSTA_TIMEO, "is alive again"); | |
4445 | } else { | |
4446 | nfs_up(nmp, req->r_thread, NFSSTA_TIMEO, NULL); | |
4447 | } | |
4448 | } | |
4449 | if (!error && !nmp) { | |
4450 | error = ENXIO; | |
4451 | } | |
4452 | nfsmout_if(error); | |
4453 | ||
4454 | /* | |
4455 | * break down the RPC header and check if ok | |
4456 | */ | |
4457 | nmrep = req->r_nmrep; | |
4458 | nfsm_chain_get_32(error, &nmrep, reply_status); | |
4459 | nfsmout_if(error); | |
4460 | if (reply_status == RPC_MSGDENIED) { | |
4461 | nfsm_chain_get_32(error, &nmrep, rejected_status); | |
4462 | nfsmout_if(error); | |
4463 | if (rejected_status == RPC_MISMATCH) { | |
4464 | error = ENOTSUP; | |
4465 | goto nfsmout; | |
4466 | } | |
4467 | nfsm_chain_get_32(error, &nmrep, auth_status); | |
4468 | nfsmout_if(error); | |
4469 | switch (auth_status) { | |
4470 | #if CONFIG_NFS_GSS | |
4471 | case RPCSEC_GSS_CREDPROBLEM: | |
4472 | case RPCSEC_GSS_CTXPROBLEM: | |
4473 | /* | |
4474 | * An RPCSEC_GSS cred or context problem. | |
4475 | * We can't use it anymore. | |
4476 | * Restore the args, renew the context | |
4477 | * and set up for a resend. | |
4478 | */ | |
4479 | error = nfs_gss_clnt_args_restore(req); | |
4480 | if (error && error != ENEEDAUTH) { | |
4481 | break; | |
4482 | } | |
4483 | ||
4484 | if (!error) { | |
4485 | error = nfs_gss_clnt_ctx_renew(req); | |
4486 | if (error) { | |
4487 | break; | |
4488 | } | |
4489 | } | |
4490 | mbuf_freem(mrep); | |
4491 | req->r_nmrep.nmc_mhead = NULL; | |
4492 | req->r_xid = 0; // get a new XID | |
4493 | req->r_flags |= R_RESTART; | |
4494 | goto nfsmout; | |
4495 | #endif /* CONFIG_NFS_GSS */ | |
4496 | default: | |
4497 | error = EACCES; | |
4498 | break; | |
4499 | } | |
4500 | goto nfsmout; | |
4501 | } | |
4502 | ||
4503 | /* Now check the verifier */ | |
4504 | nfsm_chain_get_32(error, &nmrep, verf_type); // verifier flavor | |
4505 | nfsm_chain_get_32(error, &nmrep, verf_len); // verifier length | |
4506 | nfsmout_if(error); | |
4507 | ||
4508 | switch (req->r_auth) { | |
4509 | case RPCAUTH_NONE: | |
4510 | case RPCAUTH_SYS: | |
4511 | /* Any AUTH_SYS verifier is ignored */ | |
4512 | if (verf_len > 0) { | |
4513 | nfsm_chain_adv(error, &nmrep, nfsm_rndup(verf_len)); | |
4514 | } | |
4515 | nfsm_chain_get_32(error, &nmrep, accepted_status); | |
4516 | break; | |
4517 | #if CONFIG_NFS_GSS | |
4518 | case RPCAUTH_KRB5: | |
4519 | case RPCAUTH_KRB5I: | |
4520 | case RPCAUTH_KRB5P: | |
4521 | error = nfs_gss_clnt_verf_get(req, &nmrep, | |
4522 | verf_type, verf_len, &accepted_status); | |
4523 | break; | |
4524 | #endif /* CONFIG_NFS_GSS */ | |
4525 | } | |
4526 | nfsmout_if(error); | |
4527 | ||
4528 | switch (accepted_status) { | |
4529 | case RPC_SUCCESS: | |
4530 | if (req->r_procnum == NFSPROC_NULL) { | |
4531 | /* | |
4532 | * The NFS null procedure is unique, | |
4533 | * in not returning an NFS status. | |
4534 | */ | |
4535 | *status = NFS_OK; | |
4536 | } else { | |
4537 | nfsm_chain_get_32(error, &nmrep, *status); | |
4538 | nfsmout_if(error); | |
4539 | } | |
4540 | ||
4541 | if ((nmp->nm_vers != NFS_VER2) && (*status == NFSERR_TRYLATER)) { | |
4542 | /* | |
4543 | * It's a JUKEBOX error - delay and try again | |
4544 | */ | |
4545 | int delay, slpflag = (NMFLAG(nmp, INTR) && !(req->r_flags & R_NOINTR)) ? PCATCH : 0; | |
4546 | ||
4547 | mbuf_freem(mrep); | |
4548 | req->r_nmrep.nmc_mhead = NULL; | |
4549 | if ((req->r_delay >= 30) && !(nmp->nm_state & NFSSTA_MOUNTED)) { | |
4550 | /* we're not yet completely mounted and */ | |
4551 | /* we can't complete an RPC, so we fail */ | |
4552 | OSAddAtomic64(1, &nfsstats.rpctimeouts); | |
4553 | nfs_softterm(req); | |
4554 | error = req->r_error; | |
4555 | goto nfsmout; | |
4556 | } | |
4557 | req->r_delay = !req->r_delay ? NFS_TRYLATERDEL : (req->r_delay * 2); | |
4558 | if (req->r_delay > 30) { | |
4559 | req->r_delay = 30; | |
4560 | } | |
4561 | if (nmp->nm_tprintf_initial_delay && (req->r_delay >= nmp->nm_tprintf_initial_delay)) { | |
4562 | if (!(req->r_flags & R_JBTPRINTFMSG)) { | |
4563 | req->r_flags |= R_JBTPRINTFMSG; | |
4564 | lck_mtx_lock(&nmp->nm_lock); | |
4565 | nmp->nm_jbreqs++; | |
4566 | lck_mtx_unlock(&nmp->nm_lock); | |
4567 | } | |
4568 | nfs_down(req->r_nmp, req->r_thread, 0, NFSSTA_JUKEBOXTIMEO, | |
4569 | "resource temporarily unavailable (jukebox)", 0); | |
4570 | } | |
4571 | if ((NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && (req->r_delay == 30) && | |
4572 | !(req->r_flags & R_NOINTR)) { | |
4573 | /* for soft mounts, just give up after a short while */ | |
4574 | OSAddAtomic64(1, &nfsstats.rpctimeouts); | |
4575 | nfs_softterm(req); | |
4576 | error = req->r_error; | |
4577 | goto nfsmout; | |
4578 | } | |
4579 | delay = req->r_delay; | |
4580 | if (req->r_callback.rcb_func) { | |
4581 | struct timeval now; | |
4582 | microuptime(&now); | |
4583 | req->r_resendtime = now.tv_sec + delay; | |
4584 | } else { | |
4585 | do { | |
4586 | if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) { | |
4587 | goto nfsmout; | |
4588 | } | |
4589 | tsleep(nfs_request_finish, PSOCK | slpflag, "nfs_jukebox_trylater", hz); | |
4590 | slpflag = 0; | |
4591 | } while (--delay > 0); | |
4592 | } | |
4593 | req->r_xid = 0; // get a new XID | |
4594 | req->r_flags |= R_RESTART; | |
4595 | req->r_start = 0; | |
4596 | FSDBG(273, R_XID32(req->r_xid), nmp, req, NFSERR_TRYLATER); | |
4597 | return 0; | |
4598 | } | |
4599 | ||
4600 | if (req->r_flags & R_JBTPRINTFMSG) { | |
4601 | req->r_flags &= ~R_JBTPRINTFMSG; | |
4602 | lck_mtx_lock(&nmp->nm_lock); | |
4603 | nmp->nm_jbreqs--; | |
4604 | clearjbtimeo = (nmp->nm_jbreqs == 0) ? NFSSTA_JUKEBOXTIMEO : 0; | |
4605 | lck_mtx_unlock(&nmp->nm_lock); | |
4606 | nfs_up(nmp, req->r_thread, clearjbtimeo, "resource available again"); | |
4607 | } | |
4608 | ||
4609 | #if CONFIG_NFS4 | |
4610 | if ((nmp->nm_vers >= NFS_VER4) && (*status == NFSERR_WRONGSEC)) { | |
4611 | /* | |
4612 | * Hmmm... we need to try a different security flavor. | |
4613 | * The first time a request hits this, we will allocate an array | |
4614 | * to track flavors to try. We fill the array with the mount's | |
4615 | * preferred flavors or the server's preferred flavors or just the | |
4616 | * flavors we support. | |
4617 | */ | |
4618 | uint32_t srvflavors[NX_MAX_SEC_FLAVORS]; | |
4619 | int srvcount, i, j; | |
4620 | ||
4621 | /* Call SECINFO to try to get list of flavors from server. */ | |
4622 | srvcount = NX_MAX_SEC_FLAVORS; | |
4623 | nfs4_secinfo_rpc(nmp, &req->r_secinfo, req->r_cred, srvflavors, &srvcount); | |
4624 | ||
4625 | if (!req->r_wrongsec) { | |
4626 | /* first time... set up flavor array */ | |
4627 | MALLOC(req->r_wrongsec, uint32_t*, NX_MAX_SEC_FLAVORS * sizeof(uint32_t), M_TEMP, M_WAITOK); | |
4628 | if (!req->r_wrongsec) { | |
4629 | error = EACCES; | |
4630 | goto nfsmout; | |
4631 | } | |
4632 | i = 0; | |
4633 | if (nmp->nm_sec.count) { /* use the mount's preferred list of flavors */ | |
4634 | for (; i < nmp->nm_sec.count; i++) { | |
4635 | req->r_wrongsec[i] = nmp->nm_sec.flavors[i]; | |
4636 | } | |
4637 | } else if (srvcount) { /* otherwise use the server's list of flavors */ | |
4638 | for (; i < srvcount; i++) { | |
4639 | req->r_wrongsec[i] = srvflavors[i]; | |
4640 | } | |
4641 | } else { /* otherwise, just try the flavors we support. */ | |
4642 | req->r_wrongsec[i++] = RPCAUTH_KRB5P; | |
4643 | req->r_wrongsec[i++] = RPCAUTH_KRB5I; | |
4644 | req->r_wrongsec[i++] = RPCAUTH_KRB5; | |
4645 | req->r_wrongsec[i++] = RPCAUTH_SYS; | |
4646 | req->r_wrongsec[i++] = RPCAUTH_NONE; | |
4647 | } | |
4648 | for (; i < NX_MAX_SEC_FLAVORS; i++) { /* invalidate any remaining slots */ | |
4649 | req->r_wrongsec[i] = RPCAUTH_INVALID; | |
4650 | } | |
4651 | } | |
4652 | ||
4653 | /* clear the current flavor from the list */ | |
4654 | for (i = 0; i < NX_MAX_SEC_FLAVORS; i++) { | |
4655 | if (req->r_wrongsec[i] == req->r_auth) { | |
4656 | req->r_wrongsec[i] = RPCAUTH_INVALID; | |
4657 | } | |
4658 | } | |
4659 | ||
4660 | /* find the next flavor to try */ | |
4661 | for (i = 0; i < NX_MAX_SEC_FLAVORS; i++) { | |
4662 | if (req->r_wrongsec[i] != RPCAUTH_INVALID) { | |
4663 | if (!srvcount) { /* no server list, just try it */ | |
4664 | break; | |
4665 | } | |
4666 | /* check that it's in the server's list */ | |
4667 | for (j = 0; j < srvcount; j++) { | |
4668 | if (req->r_wrongsec[i] == srvflavors[j]) { | |
4669 | break; | |
4670 | } | |
4671 | } | |
4672 | if (j < srvcount) { /* found */ | |
4673 | break; | |
4674 | } | |
4675 | /* not found in server list */ | |
4676 | req->r_wrongsec[i] = RPCAUTH_INVALID; | |
4677 | } | |
4678 | } | |
4679 | if (i == NX_MAX_SEC_FLAVORS) { | |
4680 | /* nothing left to try! */ | |
4681 | error = EACCES; | |
4682 | goto nfsmout; | |
4683 | } | |
4684 | ||
4685 | /* retry with the next auth flavor */ | |
4686 | req->r_auth = req->r_wrongsec[i]; | |
4687 | req->r_xid = 0; // get a new XID | |
4688 | req->r_flags |= R_RESTART; | |
4689 | req->r_start = 0; | |
4690 | FSDBG(273, R_XID32(req->r_xid), nmp, req, NFSERR_WRONGSEC); | |
4691 | return 0; | |
4692 | } | |
4693 | if ((nmp->nm_vers >= NFS_VER4) && req->r_wrongsec) { | |
4694 | /* | |
4695 | * We renegotiated security for this request; so update the | |
4696 | * default security flavor for the associated node. | |
4697 | */ | |
4698 | if (req->r_np) { | |
4699 | req->r_np->n_auth = req->r_auth; | |
4700 | } | |
4701 | } | |
4702 | #endif /* CONFIG_NFS4 */ | |
4703 | if (*status == NFS_OK) { | |
4704 | /* | |
4705 | * Successful NFS request | |
4706 | */ | |
4707 | *nmrepp = nmrep; | |
4708 | req->r_nmrep.nmc_mhead = NULL; | |
4709 | break; | |
4710 | } | |
4711 | /* Got an NFS error of some kind */ | |
4712 | ||
4713 | /* | |
4714 | * If the File Handle was stale, invalidate the | |
4715 | * lookup cache, just in case. | |
4716 | */ | |
4717 | if ((*status == ESTALE) && req->r_np) { | |
4718 | cache_purge(NFSTOV(req->r_np)); | |
4719 | /* if monitored, also send delete event */ | |
4720 | if (vnode_ismonitored(NFSTOV(req->r_np))) { | |
4721 | nfs_vnode_notify(req->r_np, (VNODE_EVENT_ATTRIB | VNODE_EVENT_DELETE)); | |
4722 | } | |
4723 | } | |
4724 | if (nmp->nm_vers == NFS_VER2) { | |
4725 | mbuf_freem(mrep); | |
4726 | } else { | |
4727 | *nmrepp = nmrep; | |
4728 | } | |
4729 | req->r_nmrep.nmc_mhead = NULL; | |
4730 | error = 0; | |
4731 | break; | |
4732 | case RPC_PROGUNAVAIL: | |
4733 | error = EPROGUNAVAIL; | |
4734 | break; | |
4735 | case RPC_PROGMISMATCH: | |
4736 | error = ERPCMISMATCH; | |
4737 | break; | |
4738 | case RPC_PROCUNAVAIL: | |
4739 | error = EPROCUNAVAIL; | |
4740 | break; | |
4741 | case RPC_GARBAGE: | |
4742 | error = EBADRPC; | |
4743 | break; | |
4744 | case RPC_SYSTEM_ERR: | |
4745 | default: | |
4746 | error = EIO; | |
4747 | break; | |
4748 | } | |
4749 | nfsmout: | |
4750 | if (req->r_flags & R_JBTPRINTFMSG) { | |
4751 | req->r_flags &= ~R_JBTPRINTFMSG; | |
4752 | lck_mtx_lock(&nmp->nm_lock); | |
4753 | nmp->nm_jbreqs--; | |
4754 | clearjbtimeo = (nmp->nm_jbreqs == 0) ? NFSSTA_JUKEBOXTIMEO : 0; | |
4755 | lck_mtx_unlock(&nmp->nm_lock); | |
4756 | if (clearjbtimeo) { | |
4757 | nfs_up(nmp, req->r_thread, clearjbtimeo, NULL); | |
4758 | } | |
4759 | } | |
4760 | FSDBG(273, R_XID32(req->r_xid), nmp, req, | |
4761 | (!error && (*status == NFS_OK)) ? 0xf0f0f0f0 : error); | |
4762 | return error; | |
4763 | } | |
4764 | ||
4765 | /* | |
4766 | * NFS request using a GSS/Kerberos security flavor? | |
4767 | */ | |
4768 | int | |
4769 | nfs_request_using_gss(struct nfsreq *req) | |
4770 | { | |
4771 | if (!req->r_gss_ctx) { | |
4772 | return 0; | |
4773 | } | |
4774 | switch (req->r_auth) { | |
4775 | case RPCAUTH_KRB5: | |
4776 | case RPCAUTH_KRB5I: | |
4777 | case RPCAUTH_KRB5P: | |
4778 | return 1; | |
4779 | } | |
4780 | return 0; | |
4781 | } | |
4782 | ||
4783 | /* | |
4784 | * Perform an NFS request synchronously. | |
4785 | */ | |
4786 | ||
4787 | int | |
4788 | nfs_request( | |
4789 | nfsnode_t np, | |
4790 | mount_t mp, /* used only if !np */ | |
4791 | struct nfsm_chain *nmrest, | |
4792 | int procnum, | |
4793 | vfs_context_t ctx, | |
4794 | struct nfsreq_secinfo_args *si, | |
4795 | struct nfsm_chain *nmrepp, | |
4796 | u_int64_t *xidp, | |
4797 | int *status) | |
4798 | { | |
4799 | return nfs_request2(np, mp, nmrest, procnum, | |
4800 | vfs_context_thread(ctx), vfs_context_ucred(ctx), | |
4801 | si, 0, nmrepp, xidp, status); | |
4802 | } | |
4803 | ||
4804 | int | |
4805 | nfs_request2( | |
4806 | nfsnode_t np, | |
4807 | mount_t mp, /* used only if !np */ | |
4808 | struct nfsm_chain *nmrest, | |
4809 | int procnum, | |
4810 | thread_t thd, | |
4811 | kauth_cred_t cred, | |
4812 | struct nfsreq_secinfo_args *si, | |
4813 | int flags, | |
4814 | struct nfsm_chain *nmrepp, | |
4815 | u_int64_t *xidp, | |
4816 | int *status) | |
4817 | { | |
4818 | struct nfsreq rq, *req = &rq; | |
4819 | int error; | |
4820 | ||
4821 | if ((error = nfs_request_create(np, mp, nmrest, procnum, thd, cred, &req))) { | |
4822 | return error; | |
4823 | } | |
4824 | req->r_flags |= (flags & (R_OPTMASK | R_SOFT)); | |
4825 | if (si) { | |
4826 | req->r_secinfo = *si; | |
4827 | } | |
4828 | ||
4829 | FSDBG_TOP(273, R_XID32(req->r_xid), np, procnum, 0); | |
4830 | do { | |
4831 | req->r_error = 0; | |
4832 | req->r_flags &= ~R_RESTART; | |
4833 | if ((error = nfs_request_add_header(req))) { | |
4834 | break; | |
4835 | } | |
4836 | if (xidp) { | |
4837 | *xidp = req->r_xid; | |
4838 | } | |
4839 | if ((error = nfs_request_send(req, 1))) { | |
4840 | break; | |
4841 | } | |
4842 | nfs_request_wait(req); | |
4843 | if ((error = nfs_request_finish(req, nmrepp, status))) { | |
4844 | break; | |
4845 | } | |
4846 | } while (req->r_flags & R_RESTART); | |
4847 | ||
4848 | FSDBG_BOT(273, R_XID32(req->r_xid), np, procnum, error); | |
4849 | nfs_request_rele(req); | |
4850 | return error; | |
4851 | } | |
4852 | ||
4853 | ||
4854 | #if CONFIG_NFS_GSS | |
4855 | /* | |
4856 | * Set up a new null proc request to exchange GSS context tokens with the | |
4857 | * server. Associate the context that we are setting up with the request that we | |
4858 | * are sending. | |
4859 | */ | |
4860 | ||
4861 | int | |
4862 | nfs_request_gss( | |
4863 | mount_t mp, | |
4864 | struct nfsm_chain *nmrest, | |
4865 | thread_t thd, | |
4866 | kauth_cred_t cred, | |
4867 | int flags, | |
4868 | struct nfs_gss_clnt_ctx *cp, /* Set to gss context to renew or setup */ | |
4869 | struct nfsm_chain *nmrepp, | |
4870 | int *status) | |
4871 | { | |
4872 | struct nfsreq rq, *req = &rq; | |
4873 | int error, wait = 1; | |
4874 | ||
4875 | if ((error = nfs_request_create(NULL, mp, nmrest, NFSPROC_NULL, thd, cred, &req))) { | |
4876 | return error; | |
4877 | } | |
4878 | req->r_flags |= (flags & R_OPTMASK); | |
4879 | ||
4880 | if (cp == NULL) { | |
4881 | printf("nfs_request_gss request has no context\n"); | |
4882 | nfs_request_rele(req); | |
4883 | return NFSERR_EAUTH; | |
4884 | } | |
4885 | nfs_gss_clnt_ctx_ref(req, cp); | |
4886 | ||
4887 | /* | |
4888 | * Don't wait for a reply to a context destroy advisory | |
4889 | * to avoid hanging on a dead server. | |
4890 | */ | |
4891 | if (cp->gss_clnt_proc == RPCSEC_GSS_DESTROY) { | |
4892 | wait = 0; | |
4893 | } | |
4894 | ||
4895 | FSDBG_TOP(273, R_XID32(req->r_xid), NULL, NFSPROC_NULL, 0); | |
4896 | do { | |
4897 | req->r_error = 0; | |
4898 | req->r_flags &= ~R_RESTART; | |
4899 | if ((error = nfs_request_add_header(req))) { | |
4900 | break; | |
4901 | } | |
4902 | ||
4903 | if ((error = nfs_request_send(req, wait))) { | |
4904 | break; | |
4905 | } | |
4906 | if (!wait) { | |
4907 | break; | |
4908 | } | |
4909 | ||
4910 | nfs_request_wait(req); | |
4911 | if ((error = nfs_request_finish(req, nmrepp, status))) { | |
4912 | break; | |
4913 | } | |
4914 | } while (req->r_flags & R_RESTART); | |
4915 | ||
4916 | FSDBG_BOT(273, R_XID32(req->r_xid), NULL, NFSPROC_NULL, error); | |
4917 | ||
4918 | nfs_gss_clnt_ctx_unref(req); | |
4919 | nfs_request_rele(req); | |
4920 | ||
4921 | return error; | |
4922 | } | |
4923 | #endif /* CONFIG_NFS_GSS */ | |
4924 | ||
4925 | /* | |
4926 | * Create and start an asynchronous NFS request. | |
4927 | */ | |
4928 | int | |
4929 | nfs_request_async( | |
4930 | nfsnode_t np, | |
4931 | mount_t mp, /* used only if !np */ | |
4932 | struct nfsm_chain *nmrest, | |
4933 | int procnum, | |
4934 | thread_t thd, | |
4935 | kauth_cred_t cred, | |
4936 | struct nfsreq_secinfo_args *si, | |
4937 | int flags, | |
4938 | struct nfsreq_cbinfo *cb, | |
4939 | struct nfsreq **reqp) | |
4940 | { | |
4941 | struct nfsreq *req; | |
4942 | struct nfsmount *nmp; | |
4943 | int error, sent; | |
4944 | ||
4945 | error = nfs_request_create(np, mp, nmrest, procnum, thd, cred, reqp); | |
4946 | req = *reqp; | |
4947 | FSDBG(274, (req ? R_XID32(req->r_xid) : 0), np, procnum, error); | |
4948 | if (error) { | |
4949 | return error; | |
4950 | } | |
4951 | req->r_flags |= (flags & R_OPTMASK); | |
4952 | req->r_flags |= R_ASYNC; | |
4953 | if (si) { | |
4954 | req->r_secinfo = *si; | |
4955 | } | |
4956 | if (cb) { | |
4957 | req->r_callback = *cb; | |
4958 | } | |
4959 | error = nfs_request_add_header(req); | |
4960 | if (!error) { | |
4961 | req->r_flags |= R_WAITSENT; | |
4962 | if (req->r_callback.rcb_func) { | |
4963 | nfs_request_ref(req, 0); | |
4964 | } | |
4965 | error = nfs_request_send(req, 1); | |
4966 | lck_mtx_lock(&req->r_mtx); | |
4967 | if (!error && !(req->r_flags & R_SENT) && req->r_callback.rcb_func) { | |
4968 | /* make sure to wait until this async I/O request gets sent */ | |
4969 | int slpflag = (req->r_nmp && NMFLAG(req->r_nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) ? PCATCH : 0; | |
4970 | struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 }; | |
4971 | while (!(req->r_flags & R_SENT)) { | |
4972 | nmp = req->r_nmp; | |
4973 | if ((req->r_flags & R_RESENDQ) && !nfs_mount_gone(nmp)) { | |
4974 | lck_mtx_lock(&nmp->nm_lock); | |
4975 | if ((nmp->nm_state & NFSSTA_RECOVER) && (req->r_rchain.tqe_next != NFSREQNOLIST)) { | |
4976 | /* | |
4977 | * It's not going to get off the resend queue if we're in recovery. | |
4978 | * So, just take it off ourselves. We could be holding mount state | |
4979 | * busy and thus holding up the start of recovery. | |
4980 | */ | |
4981 | TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain); | |
4982 | req->r_rchain.tqe_next = NFSREQNOLIST; | |
4983 | if (req->r_flags & R_RESENDQ) { | |
4984 | req->r_flags &= ~R_RESENDQ; | |
4985 | } | |
4986 | lck_mtx_unlock(&nmp->nm_lock); | |
4987 | req->r_flags |= R_SENDING; | |
4988 | lck_mtx_unlock(&req->r_mtx); | |
4989 | error = nfs_send(req, 1); | |
4990 | /* Remove the R_RESENDQ reference */ | |
4991 | nfs_request_rele(req); | |
4992 | lck_mtx_lock(&req->r_mtx); | |
4993 | if (error) { | |
4994 | break; | |
4995 | } | |
4996 | continue; | |
4997 | } | |
4998 | lck_mtx_unlock(&nmp->nm_lock); | |
4999 | } | |
5000 | if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) { | |
5001 | break; | |
5002 | } | |
5003 | msleep(req, &req->r_mtx, slpflag | (PZERO - 1), "nfswaitsent", &ts); | |
5004 | slpflag = 0; | |
5005 | } | |
5006 | } | |
5007 | sent = req->r_flags & R_SENT; | |
5008 | lck_mtx_unlock(&req->r_mtx); | |
5009 | if (error && req->r_callback.rcb_func && !sent) { | |
5010 | nfs_request_rele(req); | |
5011 | } | |
5012 | } | |
5013 | FSDBG(274, R_XID32(req->r_xid), np, procnum, error); | |
5014 | if (error || req->r_callback.rcb_func) { | |
5015 | nfs_request_rele(req); | |
5016 | } | |
5017 | ||
5018 | return error; | |
5019 | } | |
5020 | ||
5021 | /* | |
5022 | * Wait for and finish an asynchronous NFS request. | |
5023 | */ | |
5024 | int | |
5025 | nfs_request_async_finish( | |
5026 | struct nfsreq *req, | |
5027 | struct nfsm_chain *nmrepp, | |
5028 | u_int64_t *xidp, | |
5029 | int *status) | |
5030 | { | |
5031 | int error = 0, asyncio = req->r_callback.rcb_func ? 1 : 0; | |
5032 | struct nfsmount *nmp; | |
5033 | ||
5034 | lck_mtx_lock(&req->r_mtx); | |
5035 | if (!asyncio) { | |
5036 | req->r_flags |= R_ASYNCWAIT; | |
5037 | } | |
5038 | while (req->r_flags & R_RESENDQ) { /* wait until the request is off the resend queue */ | |
5039 | struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 }; | |
5040 | ||
5041 | if ((nmp = req->r_nmp)) { | |
5042 | lck_mtx_lock(&nmp->nm_lock); | |
5043 | if ((nmp->nm_state & NFSSTA_RECOVER) && (req->r_rchain.tqe_next != NFSREQNOLIST)) { | |
5044 | /* | |
5045 | * It's not going to get off the resend queue if we're in recovery. | |
5046 | * So, just take it off ourselves. We could be holding mount state | |
5047 | * busy and thus holding up the start of recovery. | |
5048 | */ | |
5049 | TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain); | |
5050 | req->r_rchain.tqe_next = NFSREQNOLIST; | |
5051 | if (req->r_flags & R_RESENDQ) { | |
5052 | req->r_flags &= ~R_RESENDQ; | |
5053 | } | |
5054 | /* Remove the R_RESENDQ reference */ | |
5055 | assert(req->r_refs > 0); | |
5056 | req->r_refs--; | |
5057 | lck_mtx_unlock(&nmp->nm_lock); | |
5058 | break; | |
5059 | } | |
5060 | lck_mtx_unlock(&nmp->nm_lock); | |
5061 | } | |
5062 | if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) { | |
5063 | break; | |
5064 | } | |
5065 | msleep(req, &req->r_mtx, PZERO - 1, "nfsresendqwait", &ts); | |
5066 | } | |
5067 | lck_mtx_unlock(&req->r_mtx); | |
5068 | ||
5069 | if (!error) { | |
5070 | nfs_request_wait(req); | |
5071 | error = nfs_request_finish(req, nmrepp, status); | |
5072 | } | |
5073 | ||
5074 | while (!error && (req->r_flags & R_RESTART)) { | |
5075 | if (asyncio) { | |
5076 | assert(req->r_achain.tqe_next == NFSREQNOLIST); | |
5077 | lck_mtx_lock(&req->r_mtx); | |
5078 | req->r_flags &= ~R_IOD; | |
5079 | if (req->r_resendtime) { /* send later */ | |
5080 | nfs_asyncio_resend(req); | |
5081 | lck_mtx_unlock(&req->r_mtx); | |
5082 | return EINPROGRESS; | |
5083 | } | |
5084 | lck_mtx_unlock(&req->r_mtx); | |
5085 | } | |
5086 | req->r_error = 0; | |
5087 | req->r_flags &= ~R_RESTART; | |
5088 | if ((error = nfs_request_add_header(req))) { | |
5089 | break; | |
5090 | } | |
5091 | if ((error = nfs_request_send(req, !asyncio))) { | |
5092 | break; | |
5093 | } | |
5094 | if (asyncio) { | |
5095 | return EINPROGRESS; | |
5096 | } | |
5097 | nfs_request_wait(req); | |
5098 | if ((error = nfs_request_finish(req, nmrepp, status))) { | |
5099 | break; | |
5100 | } | |
5101 | } | |
5102 | if (xidp) { | |
5103 | *xidp = req->r_xid; | |
5104 | } | |
5105 | ||
5106 | FSDBG(275, R_XID32(req->r_xid), req->r_np, req->r_procnum, error); | |
5107 | nfs_request_rele(req); | |
5108 | return error; | |
5109 | } | |
5110 | ||
5111 | /* | |
5112 | * Cancel a pending asynchronous NFS request. | |
5113 | */ | |
5114 | void | |
5115 | nfs_request_async_cancel(struct nfsreq *req) | |
5116 | { | |
5117 | FSDBG(275, R_XID32(req->r_xid), req->r_np, req->r_procnum, 0xD1ED1E); | |
5118 | nfs_request_rele(req); | |
5119 | } | |
5120 | ||
5121 | /* | |
5122 | * Flag a request as being terminated. | |
5123 | */ | |
5124 | void | |
5125 | nfs_softterm(struct nfsreq *req) | |
5126 | { | |
5127 | struct nfsmount *nmp = req->r_nmp; | |
5128 | req->r_flags |= R_SOFTTERM; | |
5129 | req->r_error = ETIMEDOUT; | |
5130 | if (!(req->r_flags & R_CWND) || nfs_mount_gone(nmp)) { | |
5131 | return; | |
5132 | } | |
5133 | /* update congestion window */ | |
5134 | req->r_flags &= ~R_CWND; | |
5135 | lck_mtx_lock(&nmp->nm_lock); | |
5136 | FSDBG(532, R_XID32(req->r_xid), req, nmp->nm_sent, nmp->nm_cwnd); | |
5137 | nmp->nm_sent -= NFS_CWNDSCALE; | |
5138 | if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) { | |
5139 | /* congestion window is open, poke the cwnd queue */ | |
5140 | struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq); | |
5141 | TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain); | |
5142 | req2->r_cchain.tqe_next = NFSREQNOLIST; | |
5143 | wakeup(req2); | |
5144 | } | |
5145 | lck_mtx_unlock(&nmp->nm_lock); | |
5146 | } | |
5147 | ||
5148 | /* | |
5149 | * Ensure req isn't in use by the timer, then dequeue it. | |
5150 | */ | |
5151 | void | |
5152 | nfs_reqdequeue(struct nfsreq *req) | |
5153 | { | |
5154 | lck_mtx_lock(nfs_request_mutex); | |
5155 | while (req->r_lflags & RL_BUSY) { | |
5156 | req->r_lflags |= RL_WAITING; | |
5157 | msleep(&req->r_lflags, nfs_request_mutex, PSOCK, "reqdeq", NULL); | |
5158 | } | |
5159 | if (req->r_lflags & RL_QUEUED) { | |
5160 | TAILQ_REMOVE(&nfs_reqq, req, r_chain); | |
5161 | req->r_lflags &= ~RL_QUEUED; | |
5162 | } | |
5163 | lck_mtx_unlock(nfs_request_mutex); | |
5164 | } | |
5165 | ||
5166 | /* | |
5167 | * Busy (lock) a nfsreq, used by the nfs timer to make sure it's not | |
5168 | * free()'d out from under it. | |
5169 | */ | |
5170 | void | |
5171 | nfs_reqbusy(struct nfsreq *req) | |
5172 | { | |
5173 | if (req->r_lflags & RL_BUSY) { | |
5174 | panic("req locked"); | |
5175 | } | |
5176 | req->r_lflags |= RL_BUSY; | |
5177 | } | |
5178 | ||
5179 | /* | |
5180 | * Unbusy the nfsreq passed in, return the next nfsreq in the chain busied. | |
5181 | */ | |
5182 | struct nfsreq * | |
5183 | nfs_reqnext(struct nfsreq *req) | |
5184 | { | |
5185 | struct nfsreq * nextreq; | |
5186 | ||
5187 | if (req == NULL) { | |
5188 | return NULL; | |
5189 | } | |
5190 | /* | |
5191 | * We need to get and busy the next req before signalling the | |
5192 | * current one, otherwise wakeup() may block us and we'll race to | |
5193 | * grab the next req. | |
5194 | */ | |
5195 | nextreq = TAILQ_NEXT(req, r_chain); | |
5196 | if (nextreq != NULL) { | |
5197 | nfs_reqbusy(nextreq); | |
5198 | } | |
5199 | /* unbusy and signal. */ | |
5200 | req->r_lflags &= ~RL_BUSY; | |
5201 | if (req->r_lflags & RL_WAITING) { | |
5202 | req->r_lflags &= ~RL_WAITING; | |
5203 | wakeup(&req->r_lflags); | |
5204 | } | |
5205 | return nextreq; | |
5206 | } | |
5207 | ||
5208 | /* | |
5209 | * NFS request queue timer routine | |
5210 | * | |
5211 | * Scan the NFS request queue for any requests that have timed out. | |
5212 | * | |
5213 | * Alert the system of unresponsive servers. | |
5214 | * Mark expired requests on soft mounts as terminated. | |
5215 | * For UDP, mark/signal requests for retransmission. | |
5216 | */ | |
5217 | void | |
5218 | nfs_request_timer(__unused void *param0, __unused void *param1) | |
5219 | { | |
5220 | struct nfsreq *req; | |
5221 | struct nfsmount *nmp; | |
5222 | int timeo, maxtime, finish_asyncio, error; | |
5223 | struct timeval now; | |
5224 | TAILQ_HEAD(nfs_mount_pokeq, nfsmount) nfs_mount_poke_queue; | |
5225 | TAILQ_INIT(&nfs_mount_poke_queue); | |
5226 | ||
5227 | restart: | |
5228 | lck_mtx_lock(nfs_request_mutex); | |
5229 | req = TAILQ_FIRST(&nfs_reqq); | |
5230 | if (req == NULL) { /* no requests - turn timer off */ | |
5231 | nfs_request_timer_on = 0; | |
5232 | lck_mtx_unlock(nfs_request_mutex); | |
5233 | return; | |
5234 | } | |
5235 | ||
5236 | nfs_reqbusy(req); | |
5237 | ||
5238 | microuptime(&now); | |
5239 | for (; req != NULL; req = nfs_reqnext(req)) { | |
5240 | nmp = req->r_nmp; | |
5241 | if (nmp == NULL) { | |
5242 | NFS_SOCK_DBG("Found a request with out a mount!\n"); | |
5243 | continue; | |
5244 | } | |
5245 | if (req->r_error || req->r_nmrep.nmc_mhead) { | |
5246 | continue; | |
5247 | } | |
5248 | if ((error = nfs_sigintr(nmp, req, req->r_thread, 0))) { | |
5249 | if (req->r_callback.rcb_func != NULL) { | |
5250 | /* async I/O RPC needs to be finished */ | |
5251 | lck_mtx_lock(&req->r_mtx); | |
5252 | req->r_error = error; | |
5253 | finish_asyncio = !(req->r_flags & R_WAITSENT); | |
5254 | wakeup(req); | |
5255 | lck_mtx_unlock(&req->r_mtx); | |
5256 | if (finish_asyncio) { | |
5257 | nfs_asyncio_finish(req); | |
5258 | } | |
5259 | } | |
5260 | continue; | |
5261 | } | |
5262 | ||
5263 | lck_mtx_lock(&req->r_mtx); | |
5264 | ||
5265 | if (nmp->nm_tprintf_initial_delay && | |
5266 | ((req->r_rexmit > 2) || (req->r_flags & R_RESENDERR)) && | |
5267 | ((req->r_lastmsg + nmp->nm_tprintf_delay) < now.tv_sec)) { | |
5268 | req->r_lastmsg = now.tv_sec; | |
5269 | nfs_down(req->r_nmp, req->r_thread, 0, NFSSTA_TIMEO, | |
5270 | "not responding", 1); | |
5271 | req->r_flags |= R_TPRINTFMSG; | |
5272 | lck_mtx_lock(&nmp->nm_lock); | |
5273 | if (!(nmp->nm_state & NFSSTA_MOUNTED)) { | |
5274 | lck_mtx_unlock(&nmp->nm_lock); | |
5275 | /* we're not yet completely mounted and */ | |
5276 | /* we can't complete an RPC, so we fail */ | |
5277 | OSAddAtomic64(1, &nfsstats.rpctimeouts); | |
5278 | nfs_softterm(req); | |
5279 | finish_asyncio = ((req->r_callback.rcb_func != NULL) && !(req->r_flags & R_WAITSENT)); | |
5280 | wakeup(req); | |
5281 | lck_mtx_unlock(&req->r_mtx); | |
5282 | if (finish_asyncio) { | |
5283 | nfs_asyncio_finish(req); | |
5284 | } | |
5285 | continue; | |
5286 | } | |
5287 | lck_mtx_unlock(&nmp->nm_lock); | |
5288 | } | |
5289 | ||
5290 | /* | |
5291 | * Put a reasonable limit on the maximum timeout, | |
5292 | * and reduce that limit when soft mounts get timeouts or are in reconnect. | |
5293 | */ | |
5294 | if (!(NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && !nfs_can_squish(nmp)) { | |
5295 | maxtime = NFS_MAXTIMEO; | |
5296 | } else if ((req->r_flags & (R_SETUP | R_RECOVER)) || | |
5297 | ((nmp->nm_reconnect_start <= 0) || ((now.tv_sec - nmp->nm_reconnect_start) < 8))) { | |
5298 | maxtime = (NFS_MAXTIMEO / (nmp->nm_timeouts + 1)) / 2; | |
5299 | } else { | |
5300 | maxtime = NFS_MINTIMEO / 4; | |
5301 | } | |
5302 | ||
5303 | /* | |
5304 | * Check for request timeout. | |
5305 | */ | |
5306 | if (req->r_rtt >= 0) { | |
5307 | req->r_rtt++; | |
5308 | lck_mtx_lock(&nmp->nm_lock); | |
5309 | if (req->r_flags & R_RESENDERR) { | |
5310 | /* with resend errors, retry every few seconds */ | |
5311 | timeo = 4 * hz; | |
5312 | } else { | |
5313 | if (req->r_procnum == NFSPROC_NULL && req->r_gss_ctx != NULL) { | |
5314 | timeo = NFS_MINIDEMTIMEO; // gss context setup | |
5315 | } else if (NMFLAG(nmp, DUMBTIMER)) { | |
5316 | timeo = nmp->nm_timeo; | |
5317 | } else { | |
5318 | timeo = NFS_RTO(nmp, proct[req->r_procnum]); | |
5319 | } | |
5320 | ||
5321 | /* ensure 62.5 ms floor */ | |
5322 | while (16 * timeo < hz) { | |
5323 | timeo *= 2; | |
5324 | } | |
5325 | if (nmp->nm_timeouts > 0) { | |
5326 | timeo *= nfs_backoff[nmp->nm_timeouts - 1]; | |
5327 | } | |
5328 | } | |
5329 | /* limit timeout to max */ | |
5330 | if (timeo > maxtime) { | |
5331 | timeo = maxtime; | |
5332 | } | |
5333 | if (req->r_rtt <= timeo) { | |
5334 | NFS_SOCK_DBG("nfs timeout: req time %d and timeo is %d continue\n", req->r_rtt, timeo); | |
5335 | lck_mtx_unlock(&nmp->nm_lock); | |
5336 | lck_mtx_unlock(&req->r_mtx); | |
5337 | continue; | |
5338 | } | |
5339 | /* The request has timed out */ | |
5340 | NFS_SOCK_DBG("nfs timeout: proc %d %d xid %llx rtt %d to %d # %d, t %ld/%d\n", | |
5341 | req->r_procnum, proct[req->r_procnum], | |
5342 | req->r_xid, req->r_rtt, timeo, nmp->nm_timeouts, | |
5343 | (now.tv_sec - req->r_start) * NFS_HZ, maxtime); | |
5344 | if (nmp->nm_timeouts < 8) { | |
5345 | nmp->nm_timeouts++; | |
5346 | } | |
5347 | if (nfs_mount_check_dead_timeout(nmp)) { | |
5348 | /* Unbusy this request */ | |
5349 | req->r_lflags &= ~RL_BUSY; | |
5350 | if (req->r_lflags & RL_WAITING) { | |
5351 | req->r_lflags &= ~RL_WAITING; | |
5352 | wakeup(&req->r_lflags); | |
5353 | } | |
5354 | lck_mtx_unlock(&req->r_mtx); | |
5355 | ||
5356 | /* No need to poke this mount */ | |
5357 | if (nmp->nm_sockflags & NMSOCK_POKE) { | |
5358 | nmp->nm_sockflags &= ~NMSOCK_POKE; | |
5359 | TAILQ_REMOVE(&nfs_mount_poke_queue, nmp, nm_pokeq); | |
5360 | } | |
5361 | /* Release our lock state, so we can become a zombie */ | |
5362 | lck_mtx_unlock(nfs_request_mutex); | |
5363 | ||
5364 | /* | |
5365 | * Note nfs_mount_make zombie(nmp) must be | |
5366 | * called with nm_lock held. After doing some | |
5367 | * work we release nm_lock in | |
5368 | * nfs_make_mount_zombie with out acquiring any | |
5369 | * other locks. (Later, in nfs_mount_zombie we | |
5370 | * will acquire nfs_request_mutex, r_mtx, | |
5371 | * nm_lock in that order). So we should not be | |
5372 | * introducing deadlock here. We take a reference | |
5373 | * on the mount so that its still there when we | |
5374 | * release the lock. | |
5375 | */ | |
5376 | nmp->nm_ref++; | |
5377 | nfs_mount_make_zombie(nmp); | |
5378 | lck_mtx_unlock(&nmp->nm_lock); | |
5379 | nfs_mount_rele(nmp); | |
5380 | ||
5381 | /* | |
5382 | * All the request for this mount have now been | |
5383 | * removed from the request queue. Restart to | |
5384 | * process the remaining mounts | |
5385 | */ | |
5386 | goto restart; | |
5387 | } | |
5388 | ||
5389 | /* if it's been a few seconds, try poking the socket */ | |
5390 | if ((nmp->nm_sotype == SOCK_STREAM) && | |
5391 | ((now.tv_sec - req->r_start) >= 3) && | |
5392 | !(nmp->nm_sockflags & (NMSOCK_POKE | NMSOCK_UNMOUNT)) && | |
5393 | (nmp->nm_sockflags & NMSOCK_READY)) { | |
5394 | nmp->nm_sockflags |= NMSOCK_POKE; | |
5395 | /* | |
5396 | * We take a ref on the mount so that we know the mount will still be there | |
5397 | * when we process the nfs_mount_poke_queue. An unmount request will block | |
5398 | * in nfs_mount_drain_and_cleanup until after the poke is finished. We release | |
5399 | * the reference after calling nfs_sock_poke below; | |
5400 | */ | |
5401 | nmp->nm_ref++; | |
5402 | TAILQ_INSERT_TAIL(&nfs_mount_poke_queue, nmp, nm_pokeq); | |
5403 | } | |
5404 | lck_mtx_unlock(&nmp->nm_lock); | |
5405 | } | |
5406 | ||
5407 | /* For soft mounts (& SETUPs/RECOVERs), check for too many retransmits/timeout. */ | |
5408 | if ((NMFLAG(nmp, SOFT) || (req->r_flags & (R_SETUP | R_RECOVER | R_SOFT))) && | |
5409 | ((req->r_rexmit >= req->r_retry) || /* too many */ | |
5410 | ((now.tv_sec - req->r_start) * NFS_HZ > maxtime))) { /* too long */ | |
5411 | OSAddAtomic64(1, &nfsstats.rpctimeouts); | |
5412 | lck_mtx_lock(&nmp->nm_lock); | |
5413 | if (!(nmp->nm_state & NFSSTA_TIMEO)) { | |
5414 | lck_mtx_unlock(&nmp->nm_lock); | |
5415 | /* make sure we note the unresponsive server */ | |
5416 | /* (maxtime may be less than tprintf delay) */ | |
5417 | nfs_down(req->r_nmp, req->r_thread, 0, NFSSTA_TIMEO, | |
5418 | "not responding", 1); | |
5419 | req->r_lastmsg = now.tv_sec; | |
5420 | req->r_flags |= R_TPRINTFMSG; | |
5421 | } else { | |
5422 | lck_mtx_unlock(&nmp->nm_lock); | |
5423 | } | |
5424 | if (req->r_flags & R_NOINTR) { | |
5425 | /* don't terminate nointr requests on timeout */ | |
5426 | lck_mtx_unlock(&req->r_mtx); | |
5427 | continue; | |
5428 | } | |
5429 | NFS_SOCK_DBG("nfs timer TERMINATE: p %d x 0x%llx f 0x%x rtt %d t %ld\n", | |
5430 | req->r_procnum, req->r_xid, req->r_flags, req->r_rtt, | |
5431 | now.tv_sec - req->r_start); | |
5432 | nfs_softterm(req); | |
5433 | finish_asyncio = ((req->r_callback.rcb_func != NULL) && !(req->r_flags & R_WAITSENT)); | |
5434 | wakeup(req); | |
5435 | lck_mtx_unlock(&req->r_mtx); | |
5436 | if (finish_asyncio) { | |
5437 | nfs_asyncio_finish(req); | |
5438 | } | |
5439 | continue; | |
5440 | } | |
5441 | ||
5442 | /* for TCP, only resend if explicitly requested */ | |
5443 | if ((nmp->nm_sotype == SOCK_STREAM) && !(req->r_flags & R_MUSTRESEND)) { | |
5444 | if (++req->r_rexmit > NFS_MAXREXMIT) { | |
5445 | req->r_rexmit = NFS_MAXREXMIT; | |
5446 | } | |
5447 | req->r_rtt = 0; | |
5448 | lck_mtx_unlock(&req->r_mtx); | |
5449 | continue; | |
5450 | } | |
5451 | ||
5452 | /* | |
5453 | * The request needs to be (re)sent. Kick the requester to resend it. | |
5454 | * (unless it's already marked as needing a resend) | |
5455 | */ | |
5456 | if ((req->r_flags & R_MUSTRESEND) && (req->r_rtt == -1)) { | |
5457 | lck_mtx_unlock(&req->r_mtx); | |
5458 | continue; | |
5459 | } | |
5460 | NFS_SOCK_DBG("nfs timer mark resend: p %d x 0x%llx f 0x%x rtt %d\n", | |
5461 | req->r_procnum, req->r_xid, req->r_flags, req->r_rtt); | |
5462 | req->r_flags |= R_MUSTRESEND; | |
5463 | req->r_rtt = -1; | |
5464 | wakeup(req); | |
5465 | if ((req->r_flags & (R_IOD | R_ASYNC | R_ASYNCWAIT | R_SENDING)) == R_ASYNC) { | |
5466 | nfs_asyncio_resend(req); | |
5467 | } | |
5468 | lck_mtx_unlock(&req->r_mtx); | |
5469 | } | |
5470 | ||
5471 | lck_mtx_unlock(nfs_request_mutex); | |
5472 | ||
5473 | /* poke any sockets */ | |
5474 | while ((nmp = TAILQ_FIRST(&nfs_mount_poke_queue))) { | |
5475 | TAILQ_REMOVE(&nfs_mount_poke_queue, nmp, nm_pokeq); | |
5476 | nfs_sock_poke(nmp); | |
5477 | nfs_mount_rele(nmp); | |
5478 | } | |
5479 | ||
5480 | nfs_interval_timer_start(nfs_request_timer_call, NFS_REQUESTDELAY); | |
5481 | } | |
5482 | ||
5483 | /* | |
5484 | * check a thread's proc for the "noremotehang" flag. | |
5485 | */ | |
5486 | int | |
5487 | nfs_noremotehang(thread_t thd) | |
5488 | { | |
5489 | proc_t p = thd ? get_bsdthreadtask_info(thd) : NULL; | |
5490 | return p && proc_noremotehang(p); | |
5491 | } | |
5492 | ||
5493 | /* | |
5494 | * Test for a termination condition pending on the process. | |
5495 | * This is used to determine if we need to bail on a mount. | |
5496 | * ETIMEDOUT is returned if there has been a soft timeout. | |
5497 | * EINTR is returned if there is a signal pending that is not being ignored | |
5498 | * and the mount is interruptable, or if we are a thread that is in the process | |
5499 | * of cancellation (also SIGKILL posted). | |
5500 | */ | |
5501 | extern int sigprop[NSIG + 1]; | |
5502 | int | |
5503 | nfs_sigintr(struct nfsmount *nmp, struct nfsreq *req, thread_t thd, int nmplocked) | |
5504 | { | |
5505 | proc_t p; | |
5506 | int error = 0; | |
5507 | ||
5508 | if (!nmp) { | |
5509 | return ENXIO; | |
5510 | } | |
5511 | ||
5512 | if (req && (req->r_flags & R_SOFTTERM)) { | |
5513 | return ETIMEDOUT; /* request has been terminated. */ | |
5514 | } | |
5515 | if (req && (req->r_flags & R_NOINTR)) { | |
5516 | thd = NULL; /* don't check for signal on R_NOINTR */ | |
5517 | } | |
5518 | if (!nmplocked) { | |
5519 | lck_mtx_lock(&nmp->nm_lock); | |
5520 | } | |
5521 | if (nmp->nm_state & NFSSTA_FORCE) { | |
5522 | /* If a force unmount is in progress then fail. */ | |
5523 | error = EIO; | |
5524 | } else if (vfs_isforce(nmp->nm_mountp)) { | |
5525 | /* Someone is unmounting us, go soft and mark it. */ | |
5526 | NFS_BITMAP_SET(nmp->nm_flags, NFS_MFLAG_SOFT); | |
5527 | nmp->nm_state |= NFSSTA_FORCE; | |
5528 | } | |
5529 | ||
5530 | /* Check if the mount is marked dead. */ | |
5531 | if (!error && (nmp->nm_state & NFSSTA_DEAD)) { | |
5532 | error = ENXIO; | |
5533 | } | |
5534 | ||
5535 | /* | |
5536 | * If the mount is hung and we've requested not to hang | |
5537 | * on remote filesystems, then bail now. | |
5538 | */ | |
5539 | if (current_proc() != kernproc && | |
5540 | !error && (nmp->nm_state & NFSSTA_TIMEO) && nfs_noremotehang(thd)) { | |
5541 | error = EIO; | |
5542 | } | |
5543 | ||
5544 | if (!nmplocked) { | |
5545 | lck_mtx_unlock(&nmp->nm_lock); | |
5546 | } | |
5547 | if (error) { | |
5548 | return error; | |
5549 | } | |
5550 | ||
5551 | /* may not have a thread for async I/O */ | |
5552 | if (thd == NULL || current_proc() == kernproc) { | |
5553 | return 0; | |
5554 | } | |
5555 | ||
5556 | /* | |
5557 | * Check if the process is aborted, but don't interrupt if we | |
5558 | * were killed by a signal and this is the exiting thread which | |
5559 | * is attempting to dump core. | |
5560 | */ | |
5561 | if (((p = current_proc()) != kernproc) && current_thread_aborted() && | |
5562 | (!(p->p_acflag & AXSIG) || (p->exit_thread != current_thread()) || | |
5563 | (p->p_sigacts == NULL) || | |
5564 | (p->p_sigacts->ps_sig < 1) || (p->p_sigacts->ps_sig > NSIG) || | |
5565 | !(sigprop[p->p_sigacts->ps_sig] & SA_CORE))) { | |
5566 | return EINTR; | |
5567 | } | |
5568 | ||
5569 | /* mask off thread and process blocked signals. */ | |
5570 | if (NMFLAG(nmp, INTR) && ((p = get_bsdthreadtask_info(thd))) && | |
5571 | proc_pendingsignals(p, NFSINT_SIGMASK)) { | |
5572 | return EINTR; | |
5573 | } | |
5574 | return 0; | |
5575 | } | |
5576 | ||
5577 | /* | |
5578 | * Lock a socket against others. | |
5579 | * Necessary for STREAM sockets to ensure you get an entire rpc request/reply | |
5580 | * and also to avoid race conditions between the processes with nfs requests | |
5581 | * in progress when a reconnect is necessary. | |
5582 | */ | |
5583 | int | |
5584 | nfs_sndlock(struct nfsreq *req) | |
5585 | { | |
5586 | struct nfsmount *nmp = req->r_nmp; | |
5587 | int *statep; | |
5588 | int error = 0, slpflag = 0; | |
5589 | struct timespec ts = { .tv_sec = 0, .tv_nsec = 0 }; | |
5590 | ||
5591 | if (nfs_mount_gone(nmp)) { | |
5592 | return ENXIO; | |
5593 | } | |
5594 | ||
5595 | lck_mtx_lock(&nmp->nm_lock); | |
5596 | statep = &nmp->nm_state; | |
5597 | ||
5598 | if (NMFLAG(nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) { | |
5599 | slpflag = PCATCH; | |
5600 | } | |
5601 | while (*statep & NFSSTA_SNDLOCK) { | |
5602 | if ((error = nfs_sigintr(nmp, req, req->r_thread, 1))) { | |
5603 | break; | |
5604 | } | |
5605 | *statep |= NFSSTA_WANTSND; | |
5606 | if (nfs_noremotehang(req->r_thread)) { | |
5607 | ts.tv_sec = 1; | |
5608 | } | |
5609 | msleep(statep, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsndlck", &ts); | |
5610 | if (slpflag == PCATCH) { | |
5611 | slpflag = 0; | |
5612 | ts.tv_sec = 2; | |
5613 | } | |
5614 | } | |
5615 | if (!error) { | |
5616 | *statep |= NFSSTA_SNDLOCK; | |
5617 | } | |
5618 | lck_mtx_unlock(&nmp->nm_lock); | |
5619 | return error; | |
5620 | } | |
5621 | ||
5622 | /* | |
5623 | * Unlock the stream socket for others. | |
5624 | */ | |
5625 | void | |
5626 | nfs_sndunlock(struct nfsreq *req) | |
5627 | { | |
5628 | struct nfsmount *nmp = req->r_nmp; | |
5629 | int *statep, wake = 0; | |
5630 | ||
5631 | if (!nmp) { | |
5632 | return; | |
5633 | } | |
5634 | lck_mtx_lock(&nmp->nm_lock); | |
5635 | statep = &nmp->nm_state; | |
5636 | if ((*statep & NFSSTA_SNDLOCK) == 0) { | |
5637 | panic("nfs sndunlock"); | |
5638 | } | |
5639 | *statep &= ~(NFSSTA_SNDLOCK | NFSSTA_SENDING); | |
5640 | if (*statep & NFSSTA_WANTSND) { | |
5641 | *statep &= ~NFSSTA_WANTSND; | |
5642 | wake = 1; | |
5643 | } | |
5644 | lck_mtx_unlock(&nmp->nm_lock); | |
5645 | if (wake) { | |
5646 | wakeup(statep); | |
5647 | } | |
5648 | } | |
5649 | ||
5650 | int | |
5651 | nfs_aux_request( | |
5652 | struct nfsmount *nmp, | |
5653 | thread_t thd, | |
5654 | struct sockaddr *saddr, | |
5655 | socket_t so, | |
5656 | int sotype, | |
5657 | mbuf_t mreq, | |
5658 | uint32_t xid, | |
5659 | int bindresv, | |
5660 | int timeo, | |
5661 | struct nfsm_chain *nmrep) | |
5662 | { | |
5663 | int error = 0, on = 1, try, sendat = 2, soproto, recv, optlen, restoreto = 0; | |
5664 | socket_t newso = NULL; | |
5665 | struct sockaddr_storage ss; | |
5666 | struct timeval orig_rcvto, orig_sndto, tv = { .tv_sec = 1, .tv_usec = 0 }; | |
5667 | mbuf_t m, mrep = NULL; | |
5668 | struct msghdr msg; | |
5669 | uint32_t rxid = 0, reply = 0, reply_status, rejected_status; | |
5670 | uint32_t verf_type, verf_len, accepted_status; | |
5671 | size_t readlen, sentlen; | |
5672 | struct nfs_rpc_record_state nrrs; | |
5673 | ||
5674 | if (!so) { | |
5675 | /* create socket and set options */ | |
5676 | if (saddr->sa_family == AF_LOCAL) { | |
5677 | soproto = 0; | |
5678 | } else { | |
5679 | soproto = (sotype == SOCK_DGRAM) ? IPPROTO_UDP : IPPROTO_TCP; | |
5680 | } | |
5681 | if ((error = sock_socket(saddr->sa_family, sotype, soproto, NULL, NULL, &newso))) { | |
5682 | goto nfsmout; | |
5683 | } | |
5684 | ||
5685 | if (bindresv && saddr->sa_family != AF_LOCAL) { | |
5686 | int level = (saddr->sa_family == AF_INET) ? IPPROTO_IP : IPPROTO_IPV6; | |
5687 | int optname = (saddr->sa_family == AF_INET) ? IP_PORTRANGE : IPV6_PORTRANGE; | |
5688 | int portrange = IP_PORTRANGE_LOW; | |
5689 | error = sock_setsockopt(newso, level, optname, &portrange, sizeof(portrange)); | |
5690 | nfsmout_if(error); | |
5691 | ss.ss_len = saddr->sa_len; | |
5692 | ss.ss_family = saddr->sa_family; | |
5693 | if (ss.ss_family == AF_INET) { | |
5694 | ((struct sockaddr_in*)&ss)->sin_addr.s_addr = INADDR_ANY; | |
5695 | ((struct sockaddr_in*)&ss)->sin_port = htons(0); | |
5696 | } else if (ss.ss_family == AF_INET6) { | |
5697 | ((struct sockaddr_in6*)&ss)->sin6_addr = in6addr_any; | |
5698 | ((struct sockaddr_in6*)&ss)->sin6_port = htons(0); | |
5699 | } else { | |
5700 | error = EINVAL; | |
5701 | } | |
5702 | if (!error) { | |
5703 | error = sock_bind(newso, (struct sockaddr *)&ss); | |
5704 | } | |
5705 | nfsmout_if(error); | |
5706 | } | |
5707 | ||
5708 | if (sotype == SOCK_STREAM) { | |
5709 | # define NFS_AUX_CONNECTION_TIMEOUT 4 /* 4 second timeout for connections */ | |
5710 | int count = 0; | |
5711 | ||
5712 | error = sock_connect(newso, saddr, MSG_DONTWAIT); | |
5713 | if (error == EINPROGRESS) { | |
5714 | error = 0; | |
5715 | } | |
5716 | nfsmout_if(error); | |
5717 | ||
5718 | while ((error = sock_connectwait(newso, &tv)) == EINPROGRESS) { | |
5719 | /* After NFS_AUX_CONNECTION_TIMEOUT bail */ | |
5720 | if (++count >= NFS_AUX_CONNECTION_TIMEOUT) { | |
5721 | error = ETIMEDOUT; | |
5722 | break; | |
5723 | } | |
5724 | } | |
5725 | nfsmout_if(error); | |
5726 | } | |
5727 | if (((error = sock_setsockopt(newso, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)))) || | |
5728 | ((error = sock_setsockopt(newso, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)))) || | |
5729 | ((error = sock_setsockopt(newso, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on))))) { | |
5730 | goto nfsmout; | |
5731 | } | |
5732 | so = newso; | |
5733 | } else { | |
5734 | /* make sure socket is using a one second timeout in this function */ | |
5735 | optlen = sizeof(orig_rcvto); | |
5736 | error = sock_getsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &orig_rcvto, &optlen); | |
5737 | if (!error) { | |
5738 | optlen = sizeof(orig_sndto); | |
5739 | error = sock_getsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &orig_sndto, &optlen); | |
5740 | } | |
5741 | if (!error) { | |
5742 | sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)); | |
5743 | sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)); | |
5744 | restoreto = 1; | |
5745 | } | |
5746 | } | |
5747 | ||
5748 | if (sotype == SOCK_STREAM) { | |
5749 | sendat = 0; /* we only resend the request for UDP */ | |
5750 | nfs_rpc_record_state_init(&nrrs); | |
5751 | } | |
5752 | ||
5753 | for (try = 0; try < timeo; try++) { | |
5754 | if ((error = nfs_sigintr(nmp, NULL, !try ? NULL : thd, 0))) { | |
5755 | break; | |
5756 | } | |
5757 | if (!try || (try == sendat)) { | |
5758 | /* send the request (resending periodically for UDP) */ | |
5759 | if ((error = mbuf_copym(mreq, 0, MBUF_COPYALL, MBUF_WAITOK, &m))) { | |
5760 | goto nfsmout; | |
5761 | } | |
5762 | bzero(&msg, sizeof(msg)); | |
5763 | if ((sotype == SOCK_DGRAM) && !sock_isconnected(so)) { | |
5764 | msg.msg_name = saddr; | |
5765 | msg.msg_namelen = saddr->sa_len; | |
5766 | } | |
5767 | if ((error = sock_sendmbuf(so, &msg, m, 0, &sentlen))) { | |
5768 | goto nfsmout; | |
5769 | } | |
5770 | sendat *= 2; | |
5771 | if (sendat > 30) { | |
5772 | sendat = 30; | |
5773 | } | |
5774 | } | |
5775 | /* wait for the response */ | |
5776 | if (sotype == SOCK_STREAM) { | |
5777 | /* try to read (more of) record */ | |
5778 | error = nfs_rpc_record_read(so, &nrrs, 0, &recv, &mrep); | |
5779 | /* if we don't have the whole record yet, we'll keep trying */ | |
5780 | } else { | |
5781 | readlen = 1 << 18; | |
5782 | bzero(&msg, sizeof(msg)); | |
5783 | error = sock_receivembuf(so, &msg, &mrep, 0, &readlen); | |
5784 | } | |
5785 | if (error == EWOULDBLOCK) { | |
5786 | continue; | |
5787 | } | |
5788 | nfsmout_if(error); | |
5789 | /* parse the response */ | |
5790 | nfsm_chain_dissect_init(error, nmrep, mrep); | |
5791 | nfsm_chain_get_32(error, nmrep, rxid); | |
5792 | nfsm_chain_get_32(error, nmrep, reply); | |
5793 | nfsmout_if(error); | |
5794 | if ((rxid != xid) || (reply != RPC_REPLY)) { | |
5795 | error = EBADRPC; | |
5796 | } | |
5797 | nfsm_chain_get_32(error, nmrep, reply_status); | |
5798 | nfsmout_if(error); | |
5799 | if (reply_status == RPC_MSGDENIED) { | |
5800 | nfsm_chain_get_32(error, nmrep, rejected_status); | |
5801 | nfsmout_if(error); | |
5802 | error = (rejected_status == RPC_MISMATCH) ? ERPCMISMATCH : EACCES; | |
5803 | goto nfsmout; | |
5804 | } | |
5805 | nfsm_chain_get_32(error, nmrep, verf_type); /* verifier flavor */ | |
5806 | nfsm_chain_get_32(error, nmrep, verf_len); /* verifier length */ | |
5807 | nfsmout_if(error); | |
5808 | if (verf_len) { | |
5809 | nfsm_chain_adv(error, nmrep, nfsm_rndup(verf_len)); | |
5810 | } | |
5811 | nfsm_chain_get_32(error, nmrep, accepted_status); | |
5812 | nfsmout_if(error); | |
5813 | switch (accepted_status) { | |
5814 | case RPC_SUCCESS: | |
5815 | error = 0; | |
5816 | break; | |
5817 | case RPC_PROGUNAVAIL: | |
5818 | error = EPROGUNAVAIL; | |
5819 | break; | |
5820 | case RPC_PROGMISMATCH: | |
5821 | error = EPROGMISMATCH; | |
5822 | break; | |
5823 | case RPC_PROCUNAVAIL: | |
5824 | error = EPROCUNAVAIL; | |
5825 | break; | |
5826 | case RPC_GARBAGE: | |
5827 | error = EBADRPC; | |
5828 | break; | |
5829 | case RPC_SYSTEM_ERR: | |
5830 | default: | |
5831 | error = EIO; | |
5832 | break; | |
5833 | } | |
5834 | break; | |
5835 | } | |
5836 | nfsmout: | |
5837 | if (restoreto) { | |
5838 | sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &orig_rcvto, sizeof(tv)); | |
5839 | sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &orig_sndto, sizeof(tv)); | |
5840 | } | |
5841 | if (newso) { | |
5842 | sock_shutdown(newso, SHUT_RDWR); | |
5843 | sock_close(newso); | |
5844 | } | |
5845 | mbuf_freem(mreq); | |
5846 | return error; | |
5847 | } | |
5848 | ||
5849 | int | |
5850 | nfs_portmap_lookup( | |
5851 | struct nfsmount *nmp, | |
5852 | vfs_context_t ctx, | |
5853 | struct sockaddr *sa, | |
5854 | socket_t so, | |
5855 | uint32_t protocol, | |
5856 | uint32_t vers, | |
5857 | uint32_t stype, | |
5858 | int timeo) | |
5859 | { | |
5860 | thread_t thd = vfs_context_thread(ctx); | |
5861 | kauth_cred_t cred = vfs_context_ucred(ctx); | |
5862 | struct sockaddr_storage ss; | |
5863 | struct sockaddr *saddr = (struct sockaddr*)&ss; | |
5864 | static struct sockaddr_un rpcbind_cots = { | |
5865 | sizeof(struct sockaddr_un), | |
5866 | AF_LOCAL, | |
5867 | RPCB_TICOTSORD_PATH | |
5868 | }; | |
5869 | static struct sockaddr_un rpcbind_clts = { | |
5870 | sizeof(struct sockaddr_un), | |
5871 | AF_LOCAL, | |
5872 | RPCB_TICLTS_PATH | |
5873 | }; | |
5874 | struct nfsm_chain nmreq, nmrep; | |
5875 | mbuf_t mreq; | |
5876 | int error = 0, ip, pmprog, pmvers, pmproc; | |
5877 | uint32_t ualen = 0; | |
5878 | uint32_t port; | |
5879 | uint64_t xid = 0; | |
5880 | char uaddr[MAX_IPv6_STR_LEN + 16]; | |
5881 | ||
5882 | bcopy(sa, saddr, min(sizeof(ss), sa->sa_len)); | |
5883 | if (saddr->sa_family == AF_INET) { | |
5884 | ip = 4; | |
5885 | pmprog = PMAPPROG; | |
5886 | pmvers = PMAPVERS; | |
5887 | pmproc = PMAPPROC_GETPORT; | |
5888 | } else if (saddr->sa_family == AF_INET6) { | |
5889 | ip = 6; | |
5890 | pmprog = RPCBPROG; | |
5891 | pmvers = RPCBVERS4; | |
5892 | pmproc = RPCBPROC_GETVERSADDR; | |
5893 | } else if (saddr->sa_family == AF_LOCAL) { | |
5894 | ip = 0; | |
5895 | pmprog = RPCBPROG; | |
5896 | pmvers = RPCBVERS4; | |
5897 | pmproc = RPCBPROC_GETVERSADDR; | |
5898 | NFS_SOCK_DBG("%s\n", ((struct sockaddr_un*)sa)->sun_path); | |
5899 | saddr = (struct sockaddr*)((stype == SOCK_STREAM) ? &rpcbind_cots : &rpcbind_clts); | |
5900 | } else { | |
5901 | return EINVAL; | |
5902 | } | |
5903 | nfsm_chain_null(&nmreq); | |
5904 | nfsm_chain_null(&nmrep); | |
5905 | ||
5906 | tryagain: | |
5907 | /* send portmapper request to get port/uaddr */ | |
5908 | if (ip == 4) { | |
5909 | ((struct sockaddr_in*)saddr)->sin_port = htons(PMAPPORT); | |
5910 | } else if (ip == 6) { | |
5911 | ((struct sockaddr_in6*)saddr)->sin6_port = htons(PMAPPORT); | |
5912 | } | |
5913 | nfsm_chain_build_alloc_init(error, &nmreq, 8 * NFSX_UNSIGNED); | |
5914 | nfsm_chain_add_32(error, &nmreq, protocol); | |
5915 | nfsm_chain_add_32(error, &nmreq, vers); | |
5916 | if (ip == 4) { | |
5917 | nfsm_chain_add_32(error, &nmreq, stype == SOCK_STREAM ? IPPROTO_TCP : IPPROTO_UDP); | |
5918 | nfsm_chain_add_32(error, &nmreq, 0); | |
5919 | } else { | |
5920 | if (stype == SOCK_STREAM) { | |
5921 | if (ip == 6) { | |
5922 | nfsm_chain_add_string(error, &nmreq, "tcp6", 4); | |
5923 | } else { | |
5924 | nfsm_chain_add_string(error, &nmreq, "ticotsord", 9); | |
5925 | } | |
5926 | } else { | |
5927 | if (ip == 6) { | |
5928 | nfsm_chain_add_string(error, &nmreq, "udp6", 4); | |
5929 | } else { | |
5930 | nfsm_chain_add_string(error, &nmreq, "ticlts", 6); | |
5931 | } | |
5932 | } | |
5933 | nfsm_chain_add_string(error, &nmreq, "", 0); /* uaddr */ | |
5934 | nfsm_chain_add_string(error, &nmreq, "", 0); /* owner */ | |
5935 | } | |
5936 | nfsm_chain_build_done(error, &nmreq); | |
5937 | nfsmout_if(error); | |
5938 | error = nfsm_rpchead2(nmp, stype, pmprog, pmvers, pmproc, | |
5939 | RPCAUTH_SYS, cred, NULL, nmreq.nmc_mhead, &xid, &mreq); | |
5940 | nfsmout_if(error); | |
5941 | nmreq.nmc_mhead = NULL; | |
5942 | ||
5943 | NFS_SOCK_DUMP_MBUF("nfs_portmap_loockup request", mreq); | |
5944 | error = nfs_aux_request(nmp, thd, saddr, so, | |
5945 | stype, mreq, R_XID32(xid), 0, timeo, &nmrep); | |
5946 | NFS_SOCK_DUMP_MBUF("nfs_portmap_lookup reply", nmrep.nmc_mhead); | |
5947 | NFS_SOCK_DBG("rpcbind request returned %d for program %u vers %u: %s\n", error, protocol, vers, | |
5948 | (saddr->sa_family == AF_LOCAL) ? ((struct sockaddr_un *)saddr)->sun_path : | |
5949 | (saddr->sa_family == AF_INET6) ? "INET6 socket" : "INET socket"); | |
5950 | ||
5951 | /* grab port from portmap response */ | |
5952 | if (ip == 4) { | |
5953 | nfsm_chain_get_32(error, &nmrep, port); | |
5954 | if (!error) { | |
5955 | ((struct sockaddr_in*)sa)->sin_port = htons(port); | |
5956 | } | |
5957 | } else { | |
5958 | /* get uaddr string and convert to sockaddr */ | |
5959 | nfsm_chain_get_32(error, &nmrep, ualen); | |
5960 | if (!error) { | |
5961 | if (ualen > (sizeof(uaddr) - 1)) { | |
5962 | error = EIO; | |
5963 | } | |
5964 | if (ualen < 1) { | |
5965 | /* program is not available, just return a zero port */ | |
5966 | bcopy(sa, saddr, min(sizeof(ss), sa->sa_len)); | |
5967 | if (ip == 6) { | |
5968 | ((struct sockaddr_in6*)saddr)->sin6_port = htons(0); | |
5969 | } else { | |
5970 | ((struct sockaddr_un*)saddr)->sun_path[0] = '\0'; | |
5971 | } | |
5972 | NFS_SOCK_DBG("Program %u version %u unavailable", protocol, vers); | |
5973 | } else { | |
5974 | nfsm_chain_get_opaque(error, &nmrep, ualen, uaddr); | |
5975 | NFS_SOCK_DBG("Got uaddr %s\n", uaddr); | |
5976 | if (!error) { | |
5977 | uaddr[ualen] = '\0'; | |
5978 | if (!nfs_uaddr2sockaddr(uaddr, saddr)) { | |
5979 | error = EIO; | |
5980 | } | |
5981 | } | |
5982 | } | |
5983 | } | |
5984 | if ((error == EPROGMISMATCH) || (error == EPROCUNAVAIL) || (error == EIO) || (error == EBADRPC)) { | |
5985 | /* remote doesn't support rpcbind version or proc (or we couldn't parse uaddr) */ | |
5986 | if (pmvers == RPCBVERS4) { | |
5987 | /* fall back to v3 and GETADDR */ | |
5988 | pmvers = RPCBVERS3; | |
5989 | pmproc = RPCBPROC_GETADDR; | |
5990 | nfsm_chain_cleanup(&nmreq); | |
5991 | nfsm_chain_cleanup(&nmrep); | |
5992 | bcopy(sa, saddr, min(sizeof(ss), sa->sa_len)); | |
5993 | xid = 0; | |
5994 | error = 0; | |
5995 | goto tryagain; | |
5996 | } | |
5997 | } | |
5998 | if (!error) { | |
5999 | bcopy(saddr, sa, min(saddr->sa_len, sa->sa_len)); | |
6000 | } | |
6001 | } | |
6002 | nfsmout: | |
6003 | nfsm_chain_cleanup(&nmreq); | |
6004 | nfsm_chain_cleanup(&nmrep); | |
6005 | NFS_SOCK_DBG("Returned %d\n", error); | |
6006 | ||
6007 | return error; | |
6008 | } | |
6009 | ||
6010 | int | |
6011 | nfs_msg(thread_t thd, | |
6012 | const char *server, | |
6013 | const char *msg, | |
6014 | int error) | |
6015 | { | |
6016 | proc_t p = thd ? get_bsdthreadtask_info(thd) : NULL; | |
6017 | tpr_t tpr; | |
6018 | ||
6019 | if (p) { | |
6020 | tpr = tprintf_open(p); | |
6021 | } else { | |
6022 | tpr = NULL; | |
6023 | } | |
6024 | if (error) { | |
6025 | tprintf(tpr, "nfs server %s: %s, error %d\n", server, msg, error); | |
6026 | } else { | |
6027 | tprintf(tpr, "nfs server %s: %s\n", server, msg); | |
6028 | } | |
6029 | tprintf_close(tpr); | |
6030 | return 0; | |
6031 | } | |
6032 | ||
6033 | #define NFS_SQUISH_MOBILE_ONLY 0x0001 /* Squish mounts only on mobile machines */ | |
6034 | #define NFS_SQUISH_AUTOMOUNTED_ONLY 0x0002 /* Squish mounts only if the are automounted */ | |
6035 | #define NFS_SQUISH_SOFT 0x0004 /* Treat all soft mounts as though they were on a mobile machine */ | |
6036 | #define NFS_SQUISH_QUICK 0x0008 /* Try to squish mounts more quickly. */ | |
6037 | #define NFS_SQUISH_SHUTDOWN 0x1000 /* Squish all mounts on shutdown. Currently not implemented */ | |
6038 | ||
6039 | uint32_t nfs_squishy_flags = NFS_SQUISH_MOBILE_ONLY | NFS_SQUISH_AUTOMOUNTED_ONLY | NFS_SQUISH_QUICK; | |
6040 | int32_t nfs_is_mobile; | |
6041 | ||
6042 | #define NFS_SQUISHY_DEADTIMEOUT 8 /* Dead time out for squishy mounts */ | |
6043 | #define NFS_SQUISHY_QUICKTIMEOUT 4 /* Quicker dead time out when nfs_squish_flags NFS_SQUISH_QUICK bit is set*/ | |
6044 | ||
6045 | /* | |
6046 | * Could this mount be squished? | |
6047 | */ | |
6048 | int | |
6049 | nfs_can_squish(struct nfsmount *nmp) | |
6050 | { | |
6051 | uint64_t flags = vfs_flags(nmp->nm_mountp); | |
6052 | int softsquish = ((nfs_squishy_flags & NFS_SQUISH_SOFT) & NMFLAG(nmp, SOFT)); | |
6053 | ||
6054 | if (!softsquish && (nfs_squishy_flags & NFS_SQUISH_MOBILE_ONLY) && nfs_is_mobile == 0) { | |
6055 | return 0; | |
6056 | } | |
6057 | ||
6058 | if ((nfs_squishy_flags & NFS_SQUISH_AUTOMOUNTED_ONLY) && (flags & MNT_AUTOMOUNTED) == 0) { | |
6059 | return 0; | |
6060 | } | |
6061 | ||
6062 | return 1; | |
6063 | } | |
6064 | ||
6065 | /* | |
6066 | * NFS mounts default to "rw,hard" - but frequently on mobile clients | |
6067 | * the mount may become "not responding". It's desirable to be able | |
6068 | * to unmount these dead mounts, but only if there is no risk of | |
6069 | * losing data or crashing applications. A "squishy" NFS mount is one | |
6070 | * that can be force unmounted with little risk of harm. | |
6071 | * | |
6072 | * nfs_is_squishy checks if a mount is in a squishy state. A mount is | |
6073 | * in a squishy state iff it is allowed to be squishy and there are no | |
6074 | * dirty pages and there are no mmapped files and there are no files | |
6075 | * open for write. Mounts are allowed to be squishy is controlled by | |
6076 | * the settings of the nfs_squishy_flags and its mobility state. These | |
6077 | * flags can be set by sysctls. | |
6078 | * | |
6079 | * If nfs_is_squishy determines that we are in a squishy state we will | |
6080 | * update the current dead timeout to at least NFS_SQUISHY_DEADTIMEOUT | |
6081 | * (or NFS_SQUISHY_QUICKTIMEOUT if NFS_SQUISH_QUICK is set) (see | |
6082 | * above) or 1/8th of the mount's nm_deadtimeout value, otherwise we just | |
6083 | * update the current dead timeout with the mount's nm_deadtimeout | |
6084 | * value set at mount time. | |
6085 | * | |
6086 | * Assumes that nm_lock is held. | |
6087 | * | |
6088 | * Note this routine is racey, but its effects on setting the | |
6089 | * dead timeout only have effects when we're in trouble and are likely | |
6090 | * to stay that way. Since by default its only for automounted | |
6091 | * volumes on mobile machines; this is a reasonable trade off between | |
6092 | * data integrity and user experience. It can be disabled or set via | |
6093 | * nfs.conf file. | |
6094 | */ | |
6095 | ||
6096 | int | |
6097 | nfs_is_squishy(struct nfsmount *nmp) | |
6098 | { | |
6099 | mount_t mp = nmp->nm_mountp; | |
6100 | int squishy = 0; | |
6101 | int timeo = (nfs_squishy_flags & NFS_SQUISH_QUICK) ? NFS_SQUISHY_QUICKTIMEOUT : NFS_SQUISHY_DEADTIMEOUT; | |
6102 | ||
6103 | NFS_SOCK_DBG("%s: nm_curdeadtimeout = %d, nfs_is_mobile = %d\n", | |
6104 | vfs_statfs(mp)->f_mntfromname, nmp->nm_curdeadtimeout, nfs_is_mobile); | |
6105 | ||
6106 | if (!nfs_can_squish(nmp)) { | |
6107 | goto out; | |
6108 | } | |
6109 | ||
6110 | timeo = (nmp->nm_deadtimeout > timeo) ? max(nmp->nm_deadtimeout / 8, timeo) : timeo; | |
6111 | NFS_SOCK_DBG("nm_writers = %d nm_mappers = %d timeo = %d\n", nmp->nm_writers, nmp->nm_mappers, timeo); | |
6112 | ||
6113 | if (nmp->nm_writers == 0 && nmp->nm_mappers == 0) { | |
6114 | uint64_t flags = mp ? vfs_flags(mp) : 0; | |
6115 | squishy = 1; | |
6116 | ||
6117 | /* | |
6118 | * Walk the nfs nodes and check for dirty buffers it we're not | |
6119 | * RDONLY and we've not already been declared as squishy since | |
6120 | * this can be a bit expensive. | |
6121 | */ | |
6122 | if (!(flags & MNT_RDONLY) && !(nmp->nm_state & NFSSTA_SQUISHY)) { | |
6123 | squishy = !nfs_mount_is_dirty(mp); | |
6124 | } | |
6125 | } | |
6126 | ||
6127 | out: | |
6128 | if (squishy) { | |
6129 | nmp->nm_state |= NFSSTA_SQUISHY; | |
6130 | } else { | |
6131 | nmp->nm_state &= ~NFSSTA_SQUISHY; | |
6132 | } | |
6133 | ||
6134 | nmp->nm_curdeadtimeout = squishy ? timeo : nmp->nm_deadtimeout; | |
6135 | ||
6136 | NFS_SOCK_DBG("nm_curdeadtimeout = %d\n", nmp->nm_curdeadtimeout); | |
6137 | ||
6138 | return squishy; | |
6139 | } | |
6140 | ||
6141 | /* | |
6142 | * On a send operation, if we can't reach the server and we've got only one server to talk to | |
6143 | * and NFS_SQUISH_QUICK flag is set and we are in a squishy state then mark the mount as dead | |
6144 | * and ask to be forcibly unmounted. Return 1 if we're dead and 0 otherwise. | |
6145 | */ | |
6146 | int | |
6147 | nfs_is_dead(int error, struct nfsmount *nmp) | |
6148 | { | |
6149 | fsid_t fsid; | |
6150 | ||
6151 | lck_mtx_lock(&nmp->nm_lock); | |
6152 | if (nmp->nm_state & NFSSTA_DEAD) { | |
6153 | lck_mtx_unlock(&nmp->nm_lock); | |
6154 | return 1; | |
6155 | } | |
6156 | ||
6157 | if ((error != ENETUNREACH && error != EHOSTUNREACH && error != EADDRNOTAVAIL) || | |
6158 | !(nmp->nm_locations.nl_numlocs == 1 && nmp->nm_locations.nl_locations[0]->nl_servcount == 1)) { | |
6159 | lck_mtx_unlock(&nmp->nm_lock); | |
6160 | return 0; | |
6161 | } | |
6162 | ||
6163 | if ((nfs_squishy_flags & NFS_SQUISH_QUICK) && nfs_is_squishy(nmp)) { | |
6164 | printf("nfs_is_dead: nfs server %s: unreachable. Squished dead\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname); | |
6165 | fsid = vfs_statfs(nmp->nm_mountp)->f_fsid; | |
6166 | lck_mtx_unlock(&nmp->nm_lock); | |
6167 | nfs_mount_zombie(nmp, NFSSTA_DEAD); | |
6168 | vfs_event_signal(&fsid, VQ_DEAD, 0); | |
6169 | return 1; | |
6170 | } | |
6171 | lck_mtx_unlock(&nmp->nm_lock); | |
6172 | return 0; | |
6173 | } | |
6174 | ||
6175 | /* | |
6176 | * If we've experienced timeouts and we're not really a | |
6177 | * classic hard mount, then just return cached data to | |
6178 | * the caller instead of likely hanging on an RPC. | |
6179 | */ | |
6180 | int | |
6181 | nfs_use_cache(struct nfsmount *nmp) | |
6182 | { | |
6183 | /* | |
6184 | *%%% We always let mobile users goto the cache, | |
6185 | * perhaps we should not even require them to have | |
6186 | * a timeout? | |
6187 | */ | |
6188 | int cache_ok = (nfs_is_mobile || NMFLAG(nmp, SOFT) || | |
6189 | nfs_can_squish(nmp) || nmp->nm_deadtimeout); | |
6190 | ||
6191 | int timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO; | |
6192 | ||
6193 | /* | |
6194 | * So if we have a timeout and we're not really a hard hard-mount, | |
6195 | * return 1 to not get things out of the cache. | |
6196 | */ | |
6197 | ||
6198 | return (nmp->nm_state & timeoutmask) && cache_ok; | |
6199 | } | |
6200 | ||
6201 | /* | |
6202 | * Log a message that nfs or lockd server is unresponsive. Check if we | |
6203 | * can be squished and if we can, or that our dead timeout has | |
6204 | * expired, and we're not holding state, set our mount as dead, remove | |
6205 | * our mount state and ask to be unmounted. If we are holding state | |
6206 | * we're being called from the nfs_request_timer and will soon detect | |
6207 | * that we need to unmount. | |
6208 | */ | |
6209 | void | |
6210 | nfs_down(struct nfsmount *nmp, thread_t thd, int error, int flags, const char *msg, int holding_state) | |
6211 | { | |
6212 | int timeoutmask, wasunresponsive, unresponsive, softnobrowse; | |
6213 | uint32_t do_vfs_signal = 0; | |
6214 | struct timeval now; | |
6215 | ||
6216 | if (nfs_mount_gone(nmp)) { | |
6217 | return; | |
6218 | } | |
6219 | ||
6220 | lck_mtx_lock(&nmp->nm_lock); | |
6221 | ||
6222 | timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO; | |
6223 | if (NMFLAG(nmp, MUTEJUKEBOX)) { /* jukebox timeouts don't count as unresponsive if muted */ | |
6224 | timeoutmask &= ~NFSSTA_JUKEBOXTIMEO; | |
6225 | } | |
6226 | wasunresponsive = (nmp->nm_state & timeoutmask); | |
6227 | ||
6228 | /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */ | |
6229 | softnobrowse = (NMFLAG(nmp, SOFT) && (vfs_flags(nmp->nm_mountp) & MNT_DONTBROWSE)); | |
6230 | ||
6231 | if ((flags & NFSSTA_TIMEO) && !(nmp->nm_state & NFSSTA_TIMEO)) { | |
6232 | nmp->nm_state |= NFSSTA_TIMEO; | |
6233 | } | |
6234 | if ((flags & NFSSTA_LOCKTIMEO) && !(nmp->nm_state & NFSSTA_LOCKTIMEO)) { | |
6235 | nmp->nm_state |= NFSSTA_LOCKTIMEO; | |
6236 | } | |
6237 | if ((flags & NFSSTA_JUKEBOXTIMEO) && !(nmp->nm_state & NFSSTA_JUKEBOXTIMEO)) { | |
6238 | nmp->nm_state |= NFSSTA_JUKEBOXTIMEO; | |
6239 | } | |
6240 | ||
6241 | unresponsive = (nmp->nm_state & timeoutmask); | |
6242 | ||
6243 | nfs_is_squishy(nmp); | |
6244 | ||
6245 | if (unresponsive && (nmp->nm_curdeadtimeout > 0)) { | |
6246 | microuptime(&now); | |
6247 | if (!wasunresponsive) { | |
6248 | nmp->nm_deadto_start = now.tv_sec; | |
6249 | nfs_mount_sock_thread_wake(nmp); | |
6250 | } else if ((now.tv_sec - nmp->nm_deadto_start) > nmp->nm_curdeadtimeout && !holding_state) { | |
6251 | if (!(nmp->nm_state & NFSSTA_DEAD)) { | |
6252 | printf("nfs server %s: %sdead\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, | |
6253 | (nmp->nm_curdeadtimeout != nmp->nm_deadtimeout) ? "squished " : ""); | |
6254 | } | |
6255 | do_vfs_signal = VQ_DEAD; | |
6256 | } | |
6257 | } | |
6258 | lck_mtx_unlock(&nmp->nm_lock); | |
6259 | ||
6260 | if (do_vfs_signal == VQ_DEAD && !(nmp->nm_state & NFSSTA_DEAD)) { | |
6261 | nfs_mount_zombie(nmp, NFSSTA_DEAD); | |
6262 | } else if (softnobrowse || wasunresponsive || !unresponsive) { | |
6263 | do_vfs_signal = 0; | |
6264 | } else { | |
6265 | do_vfs_signal = VQ_NOTRESP; | |
6266 | } | |
6267 | if (do_vfs_signal) { | |
6268 | vfs_event_signal(&vfs_statfs(nmp->nm_mountp)->f_fsid, do_vfs_signal, 0); | |
6269 | } | |
6270 | ||
6271 | nfs_msg(thd, vfs_statfs(nmp->nm_mountp)->f_mntfromname, msg, error); | |
6272 | } | |
6273 | ||
6274 | void | |
6275 | nfs_up(struct nfsmount *nmp, thread_t thd, int flags, const char *msg) | |
6276 | { | |
6277 | int timeoutmask, wasunresponsive, unresponsive, softnobrowse; | |
6278 | int do_vfs_signal; | |
6279 | ||
6280 | if (nfs_mount_gone(nmp)) { | |
6281 | return; | |
6282 | } | |
6283 | ||
6284 | if (msg) { | |
6285 | nfs_msg(thd, vfs_statfs(nmp->nm_mountp)->f_mntfromname, msg, 0); | |
6286 | } | |
6287 | ||
6288 | lck_mtx_lock(&nmp->nm_lock); | |
6289 | ||
6290 | timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO; | |
6291 | if (NMFLAG(nmp, MUTEJUKEBOX)) { /* jukebox timeouts don't count as unresponsive if muted */ | |
6292 | timeoutmask &= ~NFSSTA_JUKEBOXTIMEO; | |
6293 | } | |
6294 | wasunresponsive = (nmp->nm_state & timeoutmask); | |
6295 | ||
6296 | /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */ | |
6297 | softnobrowse = (NMFLAG(nmp, SOFT) && (vfs_flags(nmp->nm_mountp) & MNT_DONTBROWSE)); | |
6298 | ||
6299 | if ((flags & NFSSTA_TIMEO) && (nmp->nm_state & NFSSTA_TIMEO)) { | |
6300 | nmp->nm_state &= ~NFSSTA_TIMEO; | |
6301 | } | |
6302 | if ((flags & NFSSTA_LOCKTIMEO) && (nmp->nm_state & NFSSTA_LOCKTIMEO)) { | |
6303 | nmp->nm_state &= ~NFSSTA_LOCKTIMEO; | |
6304 | } | |
6305 | if ((flags & NFSSTA_JUKEBOXTIMEO) && (nmp->nm_state & NFSSTA_JUKEBOXTIMEO)) { | |
6306 | nmp->nm_state &= ~NFSSTA_JUKEBOXTIMEO; | |
6307 | } | |
6308 | ||
6309 | unresponsive = (nmp->nm_state & timeoutmask); | |
6310 | ||
6311 | nmp->nm_deadto_start = 0; | |
6312 | nmp->nm_curdeadtimeout = nmp->nm_deadtimeout; | |
6313 | nmp->nm_state &= ~NFSSTA_SQUISHY; | |
6314 | lck_mtx_unlock(&nmp->nm_lock); | |
6315 | ||
6316 | if (softnobrowse) { | |
6317 | do_vfs_signal = 0; | |
6318 | } else { | |
6319 | do_vfs_signal = (wasunresponsive && !unresponsive); | |
6320 | } | |
6321 | if (do_vfs_signal) { | |
6322 | vfs_event_signal(&vfs_statfs(nmp->nm_mountp)->f_fsid, VQ_NOTRESP, 1); | |
6323 | } | |
6324 | } | |
6325 | ||
6326 | ||
6327 | #endif /* NFSCLIENT */ | |
6328 | ||
6329 | #if NFSSERVER | |
6330 | ||
6331 | /* | |
6332 | * Generate the rpc reply header | |
6333 | * siz arg. is used to decide if adding a cluster is worthwhile | |
6334 | */ | |
6335 | int | |
6336 | nfsrv_rephead( | |
6337 | struct nfsrv_descript *nd, | |
6338 | __unused struct nfsrv_sock *slp, | |
6339 | struct nfsm_chain *nmrepp, | |
6340 | size_t siz) | |
6341 | { | |
6342 | mbuf_t mrep; | |
6343 | u_int32_t *tl; | |
6344 | struct nfsm_chain nmrep; | |
6345 | int err, error; | |
6346 | ||
6347 | err = nd->nd_repstat; | |
6348 | if (err && (nd->nd_vers == NFS_VER2)) { | |
6349 | siz = 0; | |
6350 | } | |
6351 | ||
6352 | /* | |
6353 | * If this is a big reply, use a cluster else | |
6354 | * try and leave leading space for the lower level headers. | |
6355 | */ | |
6356 | siz += RPC_REPLYSIZ; | |
6357 | if (siz >= nfs_mbuf_minclsize) { | |
6358 | error = mbuf_getpacket(MBUF_WAITOK, &mrep); | |
6359 | } else { | |
6360 | error = mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_DATA, &mrep); | |
6361 | } | |
6362 | if (error) { | |
6363 | /* unable to allocate packet */ | |
6364 | /* XXX should we keep statistics for these errors? */ | |
6365 | return error; | |
6366 | } | |
6367 | if (siz < nfs_mbuf_minclsize) { | |
6368 | /* leave space for lower level headers */ | |
6369 | tl = mbuf_data(mrep); | |
6370 | tl += 80 / sizeof(*tl); /* XXX max_hdr? XXX */ | |
6371 | mbuf_setdata(mrep, tl, 6 * NFSX_UNSIGNED); | |
6372 | } | |
6373 | nfsm_chain_init(&nmrep, mrep); | |
6374 | nfsm_chain_add_32(error, &nmrep, nd->nd_retxid); | |
6375 | nfsm_chain_add_32(error, &nmrep, RPC_REPLY); | |
6376 | if (err == ERPCMISMATCH || (err & NFSERR_AUTHERR)) { | |
6377 | nfsm_chain_add_32(error, &nmrep, RPC_MSGDENIED); | |
6378 | if (err & NFSERR_AUTHERR) { | |
6379 | nfsm_chain_add_32(error, &nmrep, RPC_AUTHERR); | |
6380 | nfsm_chain_add_32(error, &nmrep, (err & ~NFSERR_AUTHERR)); | |
6381 | } else { | |
6382 | nfsm_chain_add_32(error, &nmrep, RPC_MISMATCH); | |
6383 | nfsm_chain_add_32(error, &nmrep, RPC_VER2); | |
6384 | nfsm_chain_add_32(error, &nmrep, RPC_VER2); | |
6385 | } | |
6386 | } else { | |
6387 | /* reply status */ | |
6388 | nfsm_chain_add_32(error, &nmrep, RPC_MSGACCEPTED); | |
6389 | if (nd->nd_gss_context != NULL) { | |
6390 | /* RPCSEC_GSS verifier */ | |
6391 | error = nfs_gss_svc_verf_put(nd, &nmrep); | |
6392 | if (error) { | |
6393 | nfsm_chain_add_32(error, &nmrep, RPC_SYSTEM_ERR); | |
6394 | goto done; | |
6395 | } | |
6396 | } else { | |
6397 | /* RPCAUTH_NULL verifier */ | |
6398 | nfsm_chain_add_32(error, &nmrep, RPCAUTH_NULL); | |
6399 | nfsm_chain_add_32(error, &nmrep, 0); | |
6400 | } | |
6401 | /* accepted status */ | |
6402 | switch (err) { | |
6403 | case EPROGUNAVAIL: | |
6404 | nfsm_chain_add_32(error, &nmrep, RPC_PROGUNAVAIL); | |
6405 | break; | |
6406 | case EPROGMISMATCH: | |
6407 | nfsm_chain_add_32(error, &nmrep, RPC_PROGMISMATCH); | |
6408 | /* XXX hard coded versions? */ | |
6409 | nfsm_chain_add_32(error, &nmrep, NFS_VER2); | |
6410 | nfsm_chain_add_32(error, &nmrep, NFS_VER3); | |
6411 | break; | |
6412 | case EPROCUNAVAIL: | |
6413 | nfsm_chain_add_32(error, &nmrep, RPC_PROCUNAVAIL); | |
6414 | break; | |
6415 | case EBADRPC: | |
6416 | nfsm_chain_add_32(error, &nmrep, RPC_GARBAGE); | |
6417 | break; | |
6418 | default: | |
6419 | nfsm_chain_add_32(error, &nmrep, RPC_SUCCESS); | |
6420 | if (nd->nd_gss_context != NULL) { | |
6421 | error = nfs_gss_svc_prepare_reply(nd, &nmrep); | |
6422 | } | |
6423 | if (err != NFSERR_RETVOID) { | |
6424 | nfsm_chain_add_32(error, &nmrep, | |
6425 | (err ? nfsrv_errmap(nd, err) : 0)); | |
6426 | } | |
6427 | break; | |
6428 | } | |
6429 | } | |
6430 | ||
6431 | done: | |
6432 | nfsm_chain_build_done(error, &nmrep); | |
6433 | if (error) { | |
6434 | /* error composing reply header */ | |
6435 | /* XXX should we keep statistics for these errors? */ | |
6436 | mbuf_freem(mrep); | |
6437 | return error; | |
6438 | } | |
6439 | ||
6440 | *nmrepp = nmrep; | |
6441 | if ((err != 0) && (err != NFSERR_RETVOID)) { | |
6442 | OSAddAtomic64(1, &nfsstats.srvrpc_errs); | |
6443 | } | |
6444 | return 0; | |
6445 | } | |
6446 | ||
6447 | /* | |
6448 | * The nfs server send routine. | |
6449 | * | |
6450 | * - return EINTR or ERESTART if interrupted by a signal | |
6451 | * - return EPIPE if a connection is lost for connection based sockets (TCP...) | |
6452 | * - do any cleanup required by recoverable socket errors (???) | |
6453 | */ | |
6454 | int | |
6455 | nfsrv_send(struct nfsrv_sock *slp, mbuf_t nam, mbuf_t top) | |
6456 | { | |
6457 | int error; | |
6458 | socket_t so = slp->ns_so; | |
6459 | struct sockaddr *sendnam; | |
6460 | struct msghdr msg; | |
6461 | ||
6462 | bzero(&msg, sizeof(msg)); | |
6463 | if (nam && !sock_isconnected(so) && (slp->ns_sotype != SOCK_STREAM)) { | |
6464 | if ((sendnam = mbuf_data(nam))) { | |
6465 | msg.msg_name = (caddr_t)sendnam; | |
6466 | msg.msg_namelen = sendnam->sa_len; | |
6467 | } | |
6468 | } | |
6469 | if (NFS_IS_DBG(NFS_FAC_SRV, 15)) { | |
6470 | nfs_dump_mbuf(__func__, __LINE__, "nfsrv_send\n", top); | |
6471 | } | |
6472 | error = sock_sendmbuf(so, &msg, top, 0, NULL); | |
6473 | if (!error) { | |
6474 | return 0; | |
6475 | } | |
6476 | log(LOG_INFO, "nfsd send error %d\n", error); | |
6477 | ||
6478 | if ((error == EWOULDBLOCK) && (slp->ns_sotype == SOCK_STREAM)) { | |
6479 | error = EPIPE; /* zap TCP sockets if they time out on send */ | |
6480 | } | |
6481 | /* Handle any recoverable (soft) socket errors here. (???) */ | |
6482 | if (error != EINTR && error != ERESTART && error != EIO && | |
6483 | error != EWOULDBLOCK && error != EPIPE) { | |
6484 | error = 0; | |
6485 | } | |
6486 | ||
6487 | return error; | |
6488 | } | |
6489 | ||
6490 | /* | |
6491 | * Socket upcall routine for the nfsd sockets. | |
6492 | * The caddr_t arg is a pointer to the "struct nfsrv_sock". | |
6493 | * Essentially do as much as possible non-blocking, else punt and it will | |
6494 | * be called with MBUF_WAITOK from an nfsd. | |
6495 | */ | |
6496 | void | |
6497 | nfsrv_rcv(socket_t so, void *arg, int waitflag) | |
6498 | { | |
6499 | struct nfsrv_sock *slp = arg; | |
6500 | ||
6501 | if (!nfsd_thread_count || !(slp->ns_flag & SLP_VALID)) { | |
6502 | return; | |
6503 | } | |
6504 | ||
6505 | lck_rw_lock_exclusive(&slp->ns_rwlock); | |
6506 | nfsrv_rcv_locked(so, slp, waitflag); | |
6507 | /* Note: ns_rwlock gets dropped when called with MBUF_DONTWAIT */ | |
6508 | } | |
6509 | void | |
6510 | nfsrv_rcv_locked(socket_t so, struct nfsrv_sock *slp, int waitflag) | |
6511 | { | |
6512 | mbuf_t m, mp, mhck, m2; | |
6513 | int ns_flag = 0, error; | |
6514 | struct msghdr msg; | |
6515 | size_t bytes_read; | |
6516 | ||
6517 | if ((slp->ns_flag & SLP_VALID) == 0) { | |
6518 | if (waitflag == MBUF_DONTWAIT) { | |
6519 | lck_rw_done(&slp->ns_rwlock); | |
6520 | } | |
6521 | return; | |
6522 | } | |
6523 | ||
6524 | #ifdef notdef | |
6525 | /* | |
6526 | * Define this to test for nfsds handling this under heavy load. | |
6527 | */ | |
6528 | if (waitflag == MBUF_DONTWAIT) { | |
6529 | ns_flag = SLP_NEEDQ; | |
6530 | goto dorecs; | |
6531 | } | |
6532 | #endif | |
6533 | if (slp->ns_sotype == SOCK_STREAM) { | |
6534 | /* | |
6535 | * If there are already records on the queue, defer soreceive() | |
6536 | * to an(other) nfsd so that there is feedback to the TCP layer that | |
6537 | * the nfs servers are heavily loaded. | |
6538 | */ | |
6539 | if (slp->ns_rec) { | |
6540 | ns_flag = SLP_NEEDQ; | |
6541 | goto dorecs; | |
6542 | } | |
6543 | ||
6544 | /* | |
6545 | * Do soreceive(). | |
6546 | */ | |
6547 | bytes_read = 1000000000; | |
6548 | error = sock_receivembuf(so, NULL, &mp, MSG_DONTWAIT, &bytes_read); | |
6549 | if (error || mp == NULL) { | |
6550 | if (error == EWOULDBLOCK) { | |
6551 | ns_flag = (waitflag == MBUF_DONTWAIT) ? SLP_NEEDQ : 0; | |
6552 | } else { | |
6553 | ns_flag = SLP_DISCONN; | |
6554 | } | |
6555 | goto dorecs; | |
6556 | } | |
6557 | m = mp; | |
6558 | if (slp->ns_rawend) { | |
6559 | if ((error = mbuf_setnext(slp->ns_rawend, m))) { | |
6560 | panic("nfsrv_rcv: mbuf_setnext failed %d\n", error); | |
6561 | } | |
6562 | slp->ns_cc += bytes_read; | |
6563 | } else { | |
6564 | slp->ns_raw = m; | |
6565 | slp->ns_cc = bytes_read; | |
6566 | } | |
6567 | while ((m2 = mbuf_next(m))) { | |
6568 | m = m2; | |
6569 | } | |
6570 | slp->ns_rawend = m; | |
6571 | ||
6572 | /* | |
6573 | * Now try and parse record(s) out of the raw stream data. | |
6574 | */ | |
6575 | error = nfsrv_getstream(slp, waitflag); | |
6576 | if (error) { | |
6577 | if (error == EPERM) { | |
6578 | ns_flag = SLP_DISCONN; | |
6579 | } else { | |
6580 | ns_flag = SLP_NEEDQ; | |
6581 | } | |
6582 | } | |
6583 | } else { | |
6584 | struct sockaddr_storage nam; | |
6585 | ||
6586 | if (slp->ns_reccnt >= nfsrv_sock_max_rec_queue_length) { | |
6587 | /* already have max # RPC records queued on this socket */ | |
6588 | ns_flag = SLP_NEEDQ; | |
6589 | goto dorecs; | |
6590 | } | |
6591 | ||
6592 | bzero(&msg, sizeof(msg)); | |
6593 | msg.msg_name = (caddr_t)&nam; | |
6594 | msg.msg_namelen = sizeof(nam); | |
6595 | ||
6596 | do { | |
6597 | bytes_read = 1000000000; | |
6598 | error = sock_receivembuf(so, &msg, &mp, MSG_DONTWAIT | MSG_NEEDSA, &bytes_read); | |
6599 | if (mp) { | |
6600 | if (msg.msg_name && (mbuf_get(MBUF_WAITOK, MBUF_TYPE_SONAME, &mhck) == 0)) { | |
6601 | mbuf_setlen(mhck, nam.ss_len); | |
6602 | bcopy(&nam, mbuf_data(mhck), nam.ss_len); | |
6603 | m = mhck; | |
6604 | if (mbuf_setnext(m, mp)) { | |
6605 | /* trouble... just drop it */ | |
6606 | printf("nfsrv_rcv: mbuf_setnext failed\n"); | |
6607 | mbuf_free(mhck); | |
6608 | m = mp; | |
6609 | } | |
6610 | } else { | |
6611 | m = mp; | |
6612 | } | |
6613 | if (slp->ns_recend) { | |
6614 | mbuf_setnextpkt(slp->ns_recend, m); | |
6615 | } else { | |
6616 | slp->ns_rec = m; | |
6617 | slp->ns_flag |= SLP_DOREC; | |
6618 | } | |
6619 | slp->ns_recend = m; | |
6620 | mbuf_setnextpkt(m, NULL); | |
6621 | slp->ns_reccnt++; | |
6622 | } | |
6623 | } while (mp); | |
6624 | } | |
6625 | ||
6626 | /* | |
6627 | * Now try and process the request records, non-blocking. | |
6628 | */ | |
6629 | dorecs: | |
6630 | if (ns_flag) { | |
6631 | slp->ns_flag |= ns_flag; | |
6632 | } | |
6633 | if (waitflag == MBUF_DONTWAIT) { | |
6634 | int wake = (slp->ns_flag & SLP_WORKTODO); | |
6635 | lck_rw_done(&slp->ns_rwlock); | |
6636 | if (wake && nfsd_thread_count) { | |
6637 | lck_mtx_lock(nfsd_mutex); | |
6638 | nfsrv_wakenfsd(slp); | |
6639 | lck_mtx_unlock(nfsd_mutex); | |
6640 | } | |
6641 | } | |
6642 | } | |
6643 | ||
6644 | /* | |
6645 | * Try and extract an RPC request from the mbuf data list received on a | |
6646 | * stream socket. The "waitflag" argument indicates whether or not it | |
6647 | * can sleep. | |
6648 | */ | |
6649 | int | |
6650 | nfsrv_getstream(struct nfsrv_sock *slp, int waitflag) | |
6651 | { | |
6652 | mbuf_t m; | |
6653 | char *cp1, *cp2, *mdata; | |
6654 | int len, mlen, error; | |
6655 | mbuf_t om, m2, recm; | |
6656 | u_int32_t recmark; | |
6657 | ||
6658 | if (slp->ns_flag & SLP_GETSTREAM) { | |
6659 | panic("nfs getstream"); | |
6660 | } | |
6661 | slp->ns_flag |= SLP_GETSTREAM; | |
6662 | for (;;) { | |
6663 | if (slp->ns_reclen == 0) { | |
6664 | if (slp->ns_cc < NFSX_UNSIGNED) { | |
6665 | slp->ns_flag &= ~SLP_GETSTREAM; | |
6666 | return 0; | |
6667 | } | |
6668 | m = slp->ns_raw; | |
6669 | mdata = mbuf_data(m); | |
6670 | mlen = mbuf_len(m); | |
6671 | if (mlen >= NFSX_UNSIGNED) { | |
6672 | bcopy(mdata, (caddr_t)&recmark, NFSX_UNSIGNED); | |
6673 | mdata += NFSX_UNSIGNED; | |
6674 | mlen -= NFSX_UNSIGNED; | |
6675 | mbuf_setdata(m, mdata, mlen); | |
6676 | } else { | |
6677 | cp1 = (caddr_t)&recmark; | |
6678 | cp2 = mdata; | |
6679 | while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) { | |
6680 | while (mlen == 0) { | |
6681 | m = mbuf_next(m); | |
6682 | cp2 = mbuf_data(m); | |
6683 | mlen = mbuf_len(m); | |
6684 | } | |
6685 | *cp1++ = *cp2++; | |
6686 | mlen--; | |
6687 | mbuf_setdata(m, cp2, mlen); | |
6688 | } | |
6689 | } | |
6690 | slp->ns_cc -= NFSX_UNSIGNED; | |
6691 | recmark = ntohl(recmark); | |
6692 | slp->ns_reclen = recmark & ~0x80000000; | |
6693 | if (recmark & 0x80000000) { | |
6694 | slp->ns_flag |= SLP_LASTFRAG; | |
6695 | } else { | |
6696 | slp->ns_flag &= ~SLP_LASTFRAG; | |
6697 | } | |
6698 | if (slp->ns_reclen <= 0 || slp->ns_reclen > NFS_MAXPACKET) { | |
6699 | slp->ns_flag &= ~SLP_GETSTREAM; | |
6700 | return EPERM; | |
6701 | } | |
6702 | } | |
6703 | ||
6704 | /* | |
6705 | * Now get the record part. | |
6706 | * | |
6707 | * Note that slp->ns_reclen may be 0. Linux sometimes | |
6708 | * generates 0-length RPCs | |
6709 | */ | |
6710 | recm = NULL; | |
6711 | if (slp->ns_cc == slp->ns_reclen) { | |
6712 | recm = slp->ns_raw; | |
6713 | slp->ns_raw = slp->ns_rawend = NULL; | |
6714 | slp->ns_cc = slp->ns_reclen = 0; | |
6715 | } else if (slp->ns_cc > slp->ns_reclen) { | |
6716 | len = 0; | |
6717 | m = slp->ns_raw; | |
6718 | mlen = mbuf_len(m); | |
6719 | mdata = mbuf_data(m); | |
6720 | om = NULL; | |
6721 | while (len < slp->ns_reclen) { | |
6722 | if ((len + mlen) > slp->ns_reclen) { | |
6723 | if (mbuf_copym(m, 0, slp->ns_reclen - len, waitflag, &m2)) { | |
6724 | slp->ns_flag &= ~SLP_GETSTREAM; | |
6725 | return EWOULDBLOCK; | |
6726 | } | |
6727 | if (om) { | |
6728 | if (mbuf_setnext(om, m2)) { | |
6729 | /* trouble... just drop it */ | |
6730 | printf("nfsrv_getstream: mbuf_setnext failed\n"); | |
6731 | mbuf_freem(m2); | |
6732 | slp->ns_flag &= ~SLP_GETSTREAM; | |
6733 | return EWOULDBLOCK; | |
6734 | } | |
6735 | recm = slp->ns_raw; | |
6736 | } else { | |
6737 | recm = m2; | |
6738 | } | |
6739 | mdata += slp->ns_reclen - len; | |
6740 | mlen -= slp->ns_reclen - len; | |
6741 | mbuf_setdata(m, mdata, mlen); | |
6742 | len = slp->ns_reclen; | |
6743 | } else if ((len + mlen) == slp->ns_reclen) { | |
6744 | om = m; | |
6745 | len += mlen; | |
6746 | m = mbuf_next(m); | |
6747 | recm = slp->ns_raw; | |
6748 | if (mbuf_setnext(om, NULL)) { | |
6749 | printf("nfsrv_getstream: mbuf_setnext failed 2\n"); | |
6750 | slp->ns_flag &= ~SLP_GETSTREAM; | |
6751 | return EWOULDBLOCK; | |
6752 | } | |
6753 | mlen = mbuf_len(m); | |
6754 | mdata = mbuf_data(m); | |
6755 | } else { | |
6756 | om = m; | |
6757 | len += mlen; | |
6758 | m = mbuf_next(m); | |
6759 | mlen = mbuf_len(m); | |
6760 | mdata = mbuf_data(m); | |
6761 | } | |
6762 | } | |
6763 | slp->ns_raw = m; | |
6764 | slp->ns_cc -= len; | |
6765 | slp->ns_reclen = 0; | |
6766 | } else { | |
6767 | slp->ns_flag &= ~SLP_GETSTREAM; | |
6768 | return 0; | |
6769 | } | |
6770 | ||
6771 | /* | |
6772 | * Accumulate the fragments into a record. | |
6773 | */ | |
6774 | if (slp->ns_frag == NULL) { | |
6775 | slp->ns_frag = recm; | |
6776 | } else { | |
6777 | m = slp->ns_frag; | |
6778 | while ((m2 = mbuf_next(m))) { | |
6779 | m = m2; | |
6780 | } | |
6781 | if ((error = mbuf_setnext(m, recm))) { | |
6782 | panic("nfsrv_getstream: mbuf_setnext failed 3, %d\n", error); | |
6783 | } | |
6784 | } | |
6785 | if (slp->ns_flag & SLP_LASTFRAG) { | |
6786 | if (slp->ns_recend) { | |
6787 | mbuf_setnextpkt(slp->ns_recend, slp->ns_frag); | |
6788 | } else { | |
6789 | slp->ns_rec = slp->ns_frag; | |
6790 | slp->ns_flag |= SLP_DOREC; | |
6791 | } | |
6792 | slp->ns_recend = slp->ns_frag; | |
6793 | slp->ns_frag = NULL; | |
6794 | } | |
6795 | } | |
6796 | } | |
6797 | ||
6798 | /* | |
6799 | * Parse an RPC header. | |
6800 | */ | |
6801 | int | |
6802 | nfsrv_dorec( | |
6803 | struct nfsrv_sock *slp, | |
6804 | struct nfsd *nfsd, | |
6805 | struct nfsrv_descript **ndp) | |
6806 | { | |
6807 | mbuf_t m; | |
6808 | mbuf_t nam; | |
6809 | struct nfsrv_descript *nd; | |
6810 | int error = 0; | |
6811 | ||
6812 | *ndp = NULL; | |
6813 | if (!(slp->ns_flag & (SLP_VALID | SLP_DOREC)) || (slp->ns_rec == NULL)) { | |
6814 | return ENOBUFS; | |
6815 | } | |
6816 | MALLOC_ZONE(nd, struct nfsrv_descript *, | |
6817 | sizeof(struct nfsrv_descript), M_NFSRVDESC, M_WAITOK); | |
6818 | if (!nd) { | |
6819 | return ENOMEM; | |
6820 | } | |
6821 | m = slp->ns_rec; | |
6822 | slp->ns_rec = mbuf_nextpkt(m); | |
6823 | if (slp->ns_rec) { | |
6824 | mbuf_setnextpkt(m, NULL); | |
6825 | } else { | |
6826 | slp->ns_flag &= ~SLP_DOREC; | |
6827 | slp->ns_recend = NULL; | |
6828 | } | |
6829 | slp->ns_reccnt--; | |
6830 | if (mbuf_type(m) == MBUF_TYPE_SONAME) { | |
6831 | nam = m; | |
6832 | m = mbuf_next(m); | |
6833 | if ((error = mbuf_setnext(nam, NULL))) { | |
6834 | panic("nfsrv_dorec: mbuf_setnext failed %d\n", error); | |
6835 | } | |
6836 | } else { | |
6837 | nam = NULL; | |
6838 | } | |
6839 | nd->nd_nam2 = nam; | |
6840 | nfsm_chain_dissect_init(error, &nd->nd_nmreq, m); | |
6841 | if (!error) { | |
6842 | error = nfsrv_getreq(nd); | |
6843 | } | |
6844 | if (error) { | |
6845 | if (nam) { | |
6846 | mbuf_freem(nam); | |
6847 | } | |
6848 | if (nd->nd_gss_context) { | |
6849 | nfs_gss_svc_ctx_deref(nd->nd_gss_context); | |
6850 | } | |
6851 | FREE_ZONE(nd, sizeof(*nd), M_NFSRVDESC); | |
6852 | return error; | |
6853 | } | |
6854 | nd->nd_mrep = NULL; | |
6855 | *ndp = nd; | |
6856 | nfsd->nfsd_nd = nd; | |
6857 | return 0; | |
6858 | } | |
6859 | ||
6860 | /* | |
6861 | * Parse an RPC request | |
6862 | * - verify it | |
6863 | * - fill in the cred struct. | |
6864 | */ | |
6865 | int | |
6866 | nfsrv_getreq(struct nfsrv_descript *nd) | |
6867 | { | |
6868 | struct nfsm_chain *nmreq; | |
6869 | int len, i; | |
6870 | u_int32_t nfsvers, auth_type; | |
6871 | int error = 0; | |
6872 | uid_t user_id; | |
6873 | gid_t group_id; | |
6874 | int ngroups; | |
6875 | uint32_t val; | |
6876 | ||
6877 | nd->nd_cr = NULL; | |
6878 | nd->nd_gss_context = NULL; | |
6879 | nd->nd_gss_seqnum = 0; | |
6880 | nd->nd_gss_mb = NULL; | |
6881 | ||
6882 | user_id = group_id = -2; | |
6883 | val = auth_type = len = 0; | |
6884 | ||
6885 | nmreq = &nd->nd_nmreq; | |
6886 | nfsm_chain_get_32(error, nmreq, nd->nd_retxid); // XID | |
6887 | nfsm_chain_get_32(error, nmreq, val); // RPC Call | |
6888 | if (!error && (val != RPC_CALL)) { | |
6889 | error = EBADRPC; | |
6890 | } | |
6891 | nfsmout_if(error); | |
6892 | nd->nd_repstat = 0; | |
6893 | nfsm_chain_get_32(error, nmreq, val); // RPC Version | |
6894 | nfsmout_if(error); | |
6895 | if (val != RPC_VER2) { | |
6896 | nd->nd_repstat = ERPCMISMATCH; | |
6897 | nd->nd_procnum = NFSPROC_NOOP; | |
6898 | return 0; | |
6899 | } | |
6900 | nfsm_chain_get_32(error, nmreq, val); // RPC Program Number | |
6901 | nfsmout_if(error); | |
6902 | if (val != NFS_PROG) { | |
6903 | nd->nd_repstat = EPROGUNAVAIL; | |
6904 | nd->nd_procnum = NFSPROC_NOOP; | |
6905 | return 0; | |
6906 | } | |
6907 | nfsm_chain_get_32(error, nmreq, nfsvers);// NFS Version Number | |
6908 | nfsmout_if(error); | |
6909 | if ((nfsvers < NFS_VER2) || (nfsvers > NFS_VER3)) { | |
6910 | nd->nd_repstat = EPROGMISMATCH; | |
6911 | nd->nd_procnum = NFSPROC_NOOP; | |
6912 | return 0; | |
6913 | } | |
6914 | nd->nd_vers = nfsvers; | |
6915 | nfsm_chain_get_32(error, nmreq, nd->nd_procnum);// NFS Procedure Number | |
6916 | nfsmout_if(error); | |
6917 | if ((nd->nd_procnum >= NFS_NPROCS) || | |
6918 | ((nd->nd_vers == NFS_VER2) && (nd->nd_procnum > NFSV2PROC_STATFS))) { | |
6919 | nd->nd_repstat = EPROCUNAVAIL; | |
6920 | nd->nd_procnum = NFSPROC_NOOP; | |
6921 | return 0; | |
6922 | } | |
6923 | if (nfsvers != NFS_VER3) { | |
6924 | nd->nd_procnum = nfsv3_procid[nd->nd_procnum]; | |
6925 | } | |
6926 | nfsm_chain_get_32(error, nmreq, auth_type); // Auth Flavor | |
6927 | nfsm_chain_get_32(error, nmreq, len); // Auth Length | |
6928 | if (!error && (len < 0 || len > RPCAUTH_MAXSIZ)) { | |
6929 | error = EBADRPC; | |
6930 | } | |
6931 | nfsmout_if(error); | |
6932 | ||
6933 | /* Handle authentication */ | |
6934 | if (auth_type == RPCAUTH_SYS) { | |
6935 | struct posix_cred temp_pcred; | |
6936 | if (nd->nd_procnum == NFSPROC_NULL) { | |
6937 | return 0; | |
6938 | } | |
6939 | nd->nd_sec = RPCAUTH_SYS; | |
6940 | nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED); // skip stamp | |
6941 | nfsm_chain_get_32(error, nmreq, len); // hostname length | |
6942 | if (len < 0 || len > NFS_MAXNAMLEN) { | |
6943 | error = EBADRPC; | |
6944 | } | |
6945 | nfsm_chain_adv(error, nmreq, nfsm_rndup(len)); // skip hostname | |
6946 | nfsmout_if(error); | |
6947 | ||
6948 | /* create a temporary credential using the bits from the wire */ | |
6949 | bzero(&temp_pcred, sizeof(temp_pcred)); | |
6950 | nfsm_chain_get_32(error, nmreq, user_id); | |
6951 | nfsm_chain_get_32(error, nmreq, group_id); | |
6952 | temp_pcred.cr_groups[0] = group_id; | |
6953 | nfsm_chain_get_32(error, nmreq, len); // extra GID count | |
6954 | if ((len < 0) || (len > RPCAUTH_UNIXGIDS)) { | |
6955 | error = EBADRPC; | |
6956 | } | |
6957 | nfsmout_if(error); | |
6958 | for (i = 1; i <= len; i++) { | |
6959 | if (i < NGROUPS) { | |
6960 | nfsm_chain_get_32(error, nmreq, temp_pcred.cr_groups[i]); | |
6961 | } else { | |
6962 | nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED); | |
6963 | } | |
6964 | } | |
6965 | nfsmout_if(error); | |
6966 | ngroups = (len >= NGROUPS) ? NGROUPS : (len + 1); | |
6967 | if (ngroups > 1) { | |
6968 | nfsrv_group_sort(&temp_pcred.cr_groups[0], ngroups); | |
6969 | } | |
6970 | nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED); // verifier flavor (should be AUTH_NONE) | |
6971 | nfsm_chain_get_32(error, nmreq, len); // verifier length | |
6972 | if (len < 0 || len > RPCAUTH_MAXSIZ) { | |
6973 | error = EBADRPC; | |
6974 | } | |
6975 | if (len > 0) { | |
6976 | nfsm_chain_adv(error, nmreq, nfsm_rndup(len)); | |
6977 | } | |
6978 | ||
6979 | /* request creation of a real credential */ | |
6980 | temp_pcred.cr_uid = user_id; | |
6981 | temp_pcred.cr_ngroups = ngroups; | |
6982 | nd->nd_cr = posix_cred_create(&temp_pcred); | |
6983 | if (nd->nd_cr == NULL) { | |
6984 | nd->nd_repstat = ENOMEM; | |
6985 | nd->nd_procnum = NFSPROC_NOOP; | |
6986 | return 0; | |
6987 | } | |
6988 | } else if (auth_type == RPCSEC_GSS) { | |
6989 | error = nfs_gss_svc_cred_get(nd, nmreq); | |
6990 | if (error) { | |
6991 | if (error == EINVAL) { | |
6992 | goto nfsmout; // drop the request | |
6993 | } | |
6994 | nd->nd_repstat = error; | |
6995 | nd->nd_procnum = NFSPROC_NOOP; | |
6996 | return 0; | |
6997 | } | |
6998 | } else { | |
6999 | if (nd->nd_procnum == NFSPROC_NULL) { // assume it's AUTH_NONE | |
7000 | return 0; | |
7001 | } | |
7002 | nd->nd_repstat = (NFSERR_AUTHERR | AUTH_REJECTCRED); | |
7003 | nd->nd_procnum = NFSPROC_NOOP; | |
7004 | return 0; | |
7005 | } | |
7006 | return 0; | |
7007 | nfsmout: | |
7008 | if (IS_VALID_CRED(nd->nd_cr)) { | |
7009 | kauth_cred_unref(&nd->nd_cr); | |
7010 | } | |
7011 | nfsm_chain_cleanup(nmreq); | |
7012 | return error; | |
7013 | } | |
7014 | ||
7015 | /* | |
7016 | * Search for a sleeping nfsd and wake it up. | |
7017 | * SIDE EFFECT: If none found, make sure the socket is queued up so that one | |
7018 | * of the running nfsds will go look for the work in the nfsrv_sockwait list. | |
7019 | * Note: Must be called with nfsd_mutex held. | |
7020 | */ | |
7021 | void | |
7022 | nfsrv_wakenfsd(struct nfsrv_sock *slp) | |
7023 | { | |
7024 | struct nfsd *nd; | |
7025 | ||
7026 | if ((slp->ns_flag & SLP_VALID) == 0) { | |
7027 | return; | |
7028 | } | |
7029 | ||
7030 | lck_rw_lock_exclusive(&slp->ns_rwlock); | |
7031 | /* if there's work to do on this socket, make sure it's queued up */ | |
7032 | if ((slp->ns_flag & SLP_WORKTODO) && !(slp->ns_flag & SLP_QUEUED)) { | |
7033 | TAILQ_INSERT_TAIL(&nfsrv_sockwait, slp, ns_svcq); | |
7034 | slp->ns_flag |= SLP_WAITQ; | |
7035 | } | |
7036 | lck_rw_done(&slp->ns_rwlock); | |
7037 | ||
7038 | /* wake up a waiting nfsd, if possible */ | |
7039 | nd = TAILQ_FIRST(&nfsd_queue); | |
7040 | if (!nd) { | |
7041 | return; | |
7042 | } | |
7043 | ||
7044 | TAILQ_REMOVE(&nfsd_queue, nd, nfsd_queue); | |
7045 | nd->nfsd_flag &= ~NFSD_WAITING; | |
7046 | wakeup(nd); | |
7047 | } | |
7048 | ||
7049 | #endif /* NFSSERVER */ |